mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-15 01:02:50 +00:00
Compare commits
10 Commits
e2e-pertur
...
finalizeBl
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ed0c89eb76 | ||
|
|
87c2ee69fd | ||
|
|
dc3327083e | ||
|
|
6a29aa2b7a | ||
|
|
8a39848d06 | ||
|
|
f9a22483c0 | ||
|
|
5a4a56a781 | ||
|
|
3e006e6bc1 | ||
|
|
ffb1fc32ea | ||
|
|
a8f91f696a |
2
.github/workflows/linkchecker.yml
vendored
2
.github/workflows/linkchecker.yml
vendored
@@ -7,6 +7,6 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2.3.4
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.13
|
||||
- uses: gaurav-nelson/github-action-markdown-link-check@1.0.12
|
||||
with:
|
||||
folder-path: "docs"
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -15,7 +15,7 @@
|
||||
.vagrant
|
||||
.vendor-new/
|
||||
.vscode/
|
||||
abci/abci-cli
|
||||
abci-cli
|
||||
addrbook.json
|
||||
artifacts/*
|
||||
build/*
|
||||
|
||||
@@ -22,7 +22,6 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [config] \#6462 Move `PrivValidator` configuration out of `BaseConfig` into its own section. (@tychoish)
|
||||
- [rpc] \#6610 Add MaxPeerBlockHeight into /status rpc call (@JayT106)
|
||||
- [libs/CList] \#6626 Automatically detach the prev/next elements in Remove function (@JayT106)
|
||||
- [fastsync/rpc] \#6620 Add TotalSyncedTime & RemainingTime to SyncInfo in /status RPC (@JayT106)
|
||||
|
||||
- Apps
|
||||
- [ABCI] \#6408 Change the `key` and `value` fields from `[]byte` to `string` in the `EventAttribute` type. (@alexanderbez)
|
||||
@@ -148,4 +147,3 @@ Friendly reminder: We have a [bug bounty program](https://hackerone.com/tendermi
|
||||
- [rpc] \#6507 fix RPC client doesn't handle url's without ports (@JayT106)
|
||||
- [statesync] \#6463 Adds Reverse Sync feature to fetch historical light blocks after state sync in order to verify any evidence (@cmwaters)
|
||||
- [fastsync] \#6590 Update the metrics during fast-sync (@JayT106)
|
||||
- [gitignore] \#6668 Fix gitignore of abci-cli (@tanyabouman)
|
||||
|
||||
2
Makefile
2
Makefile
@@ -202,7 +202,7 @@ format:
|
||||
|
||||
lint:
|
||||
@echo "--> Running linter"
|
||||
go run github.com/golangci/golangci-lint/cmd/golangci-lint run
|
||||
@golangci-lint run
|
||||
.PHONY: lint
|
||||
|
||||
DESTINATION = ./index.html.md
|
||||
|
||||
@@ -35,33 +35,29 @@ type Client interface {
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
FinalizeBlockAsync(context.Context, types.RequestFinalizeBlock) (*ReqRes, error)
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -194,16 +194,6 @@ func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo)
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
@@ -244,26 +234,6 @@ func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestI
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
@@ -314,6 +284,22 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
)
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestFinalizeBlock(params)
|
||||
res, err := cli.client.FinalizeBlock(ctx, req.GetFinalizeBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_FinalizeBlock{FinalizeBlock: res}},
|
||||
)
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
@@ -380,18 +366,6 @@ func (cli *grpcClient) InfoSync(
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
@@ -435,30 +409,6 @@ func (cli *grpcClient) InitChainSync(
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
@@ -504,3 +454,14 @@ func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.FinalizeBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetFinalizeBlock(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -77,17 +77,6 @@ func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(params)
|
||||
return app.callback(
|
||||
types.ToRequestDeliverTx(params),
|
||||
types.ToResponseDeliverTx(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -132,28 +121,6 @@ func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestIni
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -204,6 +171,20 @@ func (app *localClient) ApplySnapshotChunkAsync(
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestFinalizeBlock(req),
|
||||
types.ToResponseFinalizeBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
@@ -222,18 +203,6 @@ func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*t
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.DeliverTx(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
@@ -276,30 +245,6 @@ func (app *localClient) InitChainSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
@@ -346,6 +291,17 @@ func (app *localClient) ApplySnapshotChunkSync(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.FinalizeBlock(req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -65,52 +65,6 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@@ -203,52 +157,6 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
@@ -295,52 +203,6 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Error provides a mock function with given fields:
|
||||
func (_m *Client) Error() error {
|
||||
ret := _m.Called()
|
||||
@@ -355,6 +217,52 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FinalizeBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockAsync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
@@ -245,10 +245,6 @@ func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
@@ -265,14 +261,6 @@ func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestIn
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
}
|
||||
@@ -295,6 +283,13 @@ func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
@@ -341,18 +336,6 @@ func (cli *socketClient) InfoSync(
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
@@ -395,30 +378,6 @@ func (cli *socketClient) InitChainSync(
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
@@ -465,6 +424,17 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
return reqres.Response.GetApplySnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) FinalizeBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestFinalizeBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetFinalizeBlock(), nil
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// queueRequest enqueues req onto the queue. If the queue is full, it ether
|
||||
@@ -569,8 +539,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
_, ok = res.Value.(*types.Response_CheckTx)
|
||||
case *types.Request_Commit:
|
||||
@@ -579,10 +547,6 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_BeginBlock:
|
||||
_, ok = res.Value.(*types.Response_BeginBlock)
|
||||
case *types.Request_EndBlock:
|
||||
_, ok = res.Value.(*types.Response_EndBlock)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
@@ -591,6 +555,8 @@ func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *types.Request_FinalizeBlock:
|
||||
_, ok = res.Value.(*types.Response_FinalizeBlock)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -37,11 +37,11 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
res := reqres.Response.GetFinalizeBlock()
|
||||
assert.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
reqres, err := c.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
@@ -84,7 +84,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
err = s.Stop()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// wait for the response from BeginBlock
|
||||
// wait for the response from FinalizeBlock
|
||||
reqres.Wait()
|
||||
flush.Wait()
|
||||
resp <- c.Error()
|
||||
@@ -121,7 +121,7 @@ type slowApp struct {
|
||||
types.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
func (slowApp) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
return types.ResponseFinalizeBlock{}
|
||||
}
|
||||
|
||||
@@ -504,16 +504,18 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
printResponse(cmd, args, response{
|
||||
Code: res.Code,
|
||||
Data: res.Data,
|
||||
Info: res.Info,
|
||||
Log: res.Log,
|
||||
})
|
||||
for _, tx := range res.Txs {
|
||||
printResponse(cmd, args, response{
|
||||
Code: tx.Code,
|
||||
Data: tx.Data,
|
||||
Info: tx.Info,
|
||||
Log: tx.Log,
|
||||
})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -24,24 +24,30 @@ func (app *Application) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
return types.ResponseInfo{Data: fmt.Sprintf("{\"hashes\":%v,\"txs\":%v}", app.hashCount, app.txCount)}
|
||||
}
|
||||
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
|
||||
if app.serial {
|
||||
if len(req.Tx) > 8 {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(req.Tx))}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(req.Tx):], req.Tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
for _, tx := range req.Txs {
|
||||
if len(tx) > 8 {
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Max tx size is 8 bytes, got %d", len(tx))}},
|
||||
}
|
||||
}
|
||||
tx8 := make([]byte, 8)
|
||||
copy(tx8[len(tx8)-len(tx):], tx)
|
||||
txValue := binary.BigEndian.Uint64(tx8)
|
||||
if txValue != uint64(app.txCount) {
|
||||
return types.ResponseFinalizeBlock{
|
||||
Txs: []*types.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
return types.ResponseFinalizeBlock{Txs: []*types.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
|
||||
@@ -76,20 +76,22 @@ func testStream(t *testing.T, app types.Application) {
|
||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||
// Process response
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_DeliverTx:
|
||||
counter++
|
||||
if r.DeliverTx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
case *types.Response_FinalizeBlock:
|
||||
for _, tx := range r.FinalizeBlock.Txs {
|
||||
counter++
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatalf("Too many DeliverTx responses. Got %d, expected %d", counter, numDeliverTxs)
|
||||
}
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
close(done)
|
||||
}()
|
||||
return
|
||||
}
|
||||
}
|
||||
case *types.Response_Flush:
|
||||
// ignore
|
||||
@@ -103,7 +105,8 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
tx := []byte("test")
|
||||
_, err = client.FinalizeBlockAsync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sometimes send flush messages
|
||||
@@ -163,22 +166,25 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
txt := []byte("test")
|
||||
response, err := client.FinalizeBlock(context.Background(), &types.RequestFinalizeBlock{Txs: [][]byte{txt}})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
counter++
|
||||
if response.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", response.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
for _, tx := range response.Txs {
|
||||
if tx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", tx.Code)
|
||||
}
|
||||
if counter > numDeliverTxs {
|
||||
t.Fatal("Too many DeliverTx responses")
|
||||
}
|
||||
t.Log("response", counter)
|
||||
if counter == numDeliverTxs {
|
||||
go func() {
|
||||
time.Sleep(time.Second * 1) // Wait for a bit to allow counter overflow
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
@@ -86,35 +86,40 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
func (app *Application) FinalizeBlock(req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
var key, value string
|
||||
var txs = make([]*types.ResponseDeliverTx, len(req.Txs))
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(req.Tx), string(req.Tx)
|
||||
}
|
||||
for i, tx := range req.Txs {
|
||||
parts := bytes.Split(tx, []byte("="))
|
||||
if len(parts) == 2 {
|
||||
key, value = string(parts[0]), string(parts[1])
|
||||
} else {
|
||||
key, value = string(tx), string(tx)
|
||||
}
|
||||
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
err := app.state.db.Set(prefixKey([]byte(key)), []byte(value))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
events := []types.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
{Key: "noindex_key", Value: "index is working", Index: false},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
txs[i] = &types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
return types.ResponseFinalizeBlock{Txs: txs}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
|
||||
@@ -27,12 +27,16 @@ const (
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
req := types.RequestFinalizeBlock{Txs: [][]byte{tx}}
|
||||
ar := app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
// repeating tx doesn't raise error
|
||||
ar = app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
ar = app.FinalizeBlock(req)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
// commit
|
||||
app.Commit()
|
||||
|
||||
@@ -109,8 +113,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.FinalizeBlock(types.RequestFinalizeBlock{Hash: hash, Header: header})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
@@ -200,16 +203,15 @@ func makeApplyBlock(
|
||||
Height: height,
|
||||
}
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
resFinalizeBlock := kvstore.FinalizeBlock(types.RequestFinalizeBlock{
|
||||
Hash: hash,
|
||||
Header: header,
|
||||
Txs: txs,
|
||||
})
|
||||
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
valsEqual(t, diff, resFinalizeBlock.ValidatorUpdates)
|
||||
|
||||
}
|
||||
|
||||
@@ -326,13 +328,16 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err := app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
|
||||
ar, err = app.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{tx}}) // repeating tx doesn't raise error
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
for _, tx := range ar.Txs {
|
||||
require.False(t, tx.IsErr(), ar)
|
||||
}
|
||||
// commit
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -66,19 +66,19 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
return res
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
// update validators in the merkle tree
|
||||
// and in app.ValUpdates
|
||||
return app.execValidatorTx(req.Tx)
|
||||
}
|
||||
// // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
// func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(req.Tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
|
||||
// otherwise, update the key-value store
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(req)
|
||||
// }
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
@@ -119,8 +119,40 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
return types.ResponseInitChain{}
|
||||
}
|
||||
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) FinalizeBlock(
|
||||
req types.RequestFinalizeBlock) types.ResponseFinalizeBlock {
|
||||
// for i, tx := range req.Txs {
|
||||
// // if it starts with "val:", update the validator set
|
||||
// // format is "val:pubkey!power"
|
||||
// if isValidatorTx(tx) {
|
||||
// // update validators in the merkle tree
|
||||
// // and in app.ValUpdates
|
||||
// return app.execValidatorTx(req.Tx)
|
||||
// }
|
||||
|
||||
// // otherwise, update the key-value store
|
||||
// return app.app.DeliverTx(tx)
|
||||
// }
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
|
||||
@@ -142,32 +174,7 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
return types.ResponseFinalizeBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
|
||||
@@ -200,9 +200,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
case *types.Request_CheckTx:
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
@@ -215,12 +212,6 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_InitChain:
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_BeginBlock:
|
||||
res := s.app.BeginBlock(*r.BeginBlock)
|
||||
responses <- types.ToResponseBeginBlock(res)
|
||||
case *types.Request_EndBlock:
|
||||
res := s.app.EndBlock(*r.EndBlock)
|
||||
responses <- types.ToResponseEndBlock(res)
|
||||
case *types.Request_ListSnapshots:
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
@@ -233,6 +224,9 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
case *types.Request_FinalizeBlock:
|
||||
res := s.app.FinalizeBlock(*r.FinalizeBlock)
|
||||
responses <- types.ToResponseFinalizeBlock(res)
|
||||
default:
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
}
|
||||
|
||||
@@ -51,20 +51,22 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
func FinalizeBlock(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
for _, tx := range res.Txs {
|
||||
code, data, log := tx.Code, tx.Data, tx.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v\n",
|
||||
code, codeExp, log)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
if !bytes.Equal(data, dataExp) {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
fmt.Printf("DeliverTx response data was unexpected. Got %X expected %X\n",
|
||||
data, dataExp)
|
||||
return errors.New("deliverTx error")
|
||||
}
|
||||
}
|
||||
fmt.Println("Passed test: DeliverTx")
|
||||
return nil
|
||||
|
||||
@@ -37,16 +37,29 @@ func commit(client abcicli.Client, hashExp []byte) {
|
||||
}
|
||||
}
|
||||
|
||||
func deliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) {
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
type tx struct {
|
||||
Data []byte
|
||||
CodeExp uint32
|
||||
DataExp []byte
|
||||
}
|
||||
|
||||
func finalizeBlock(client abcicli.Client, txs []tx) {
|
||||
var txsData = make([][]byte, len(txs))
|
||||
for i, tx := range txs {
|
||||
txsData[i] = tx.Data
|
||||
}
|
||||
|
||||
res, err := client.FinalizeBlockSync(ctx, types.RequestFinalizeBlock{Txs: txsData})
|
||||
if err != nil {
|
||||
panicf("client error: %v", err)
|
||||
}
|
||||
if res.Code != codeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", res.Code, codeExp, res.Log)
|
||||
}
|
||||
if !bytes.Equal(res.Data, dataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", res.Data, dataExp)
|
||||
for i, tx := range res.Txs {
|
||||
if tx.Code != txs[i].CodeExp {
|
||||
panicf("DeliverTx response code was unexpected. Got %v expected %v. Log: %v", tx.Code, txs[i].CodeExp, tx.Log)
|
||||
}
|
||||
if !bytes.Equal(tx.Data, txs[i].DataExp) {
|
||||
panicf("DeliverTx response data was unexpected. Got %X expected %X", tx.Data, txs[i].DataExp)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -81,13 +81,15 @@ func testCounter() {
|
||||
// commit(client, nil)
|
||||
// deliverTx(client, []byte("abc"), code.CodeTypeBadNonce, nil)
|
||||
commit(client, nil)
|
||||
deliverTx(client, []byte{0x00}, types.CodeTypeOK, nil)
|
||||
finalizeBlock(client, []tx{{Data: []byte{0x00}, CodeExp: types.CodeTypeOK, DataExp: nil}})
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 1})
|
||||
// deliverTx(client, []byte{0x00}, code.CodeTypeBadNonce, nil)
|
||||
deliverTx(client, []byte{0x01}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x02}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x03}, types.CodeTypeOK, nil)
|
||||
deliverTx(client, []byte{0x00, 0x00, 0x04}, types.CodeTypeOK, nil)
|
||||
txs := []tx{
|
||||
{Data: []byte{0x01}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x02}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x03}, DataExp: nil, CodeExp: types.CodeTypeOK},
|
||||
{Data: []byte{0x00, 0x00, 0x04}, DataExp: nil, CodeExp: types.CodeTypeOK}}
|
||||
finalizeBlock(client, txs)
|
||||
// deliverTx(client, []byte{0x00, 0x00, 0x06}, code.CodeTypeBadNonce, nil)
|
||||
commit(client, []byte{0, 0, 0, 0, 0, 0, 0, 5})
|
||||
}
|
||||
|
||||
@@ -17,11 +17,9 @@ type Application interface {
|
||||
CheckTx(RequestCheckTx) ResponseCheckTx // Validate a tx for the mempool
|
||||
|
||||
// Consensus Connection
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
BeginBlock(RequestBeginBlock) ResponseBeginBlock // Signals the beginning of a block
|
||||
DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing
|
||||
EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
InitChain(RequestInitChain) ResponseInitChain // Initialize blockchain w validators/other info from TendermintCore
|
||||
FinalizeBlock(RequestFinalizeBlock) ResponseFinalizeBlock
|
||||
Commit() ResponseCommit // Commit the state and return the application Merkle root hash
|
||||
|
||||
// State Sync Connection
|
||||
ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots
|
||||
@@ -46,10 +44,6 @@ func (BaseApplication) Info(req RequestInfo) ResponseInfo {
|
||||
return ResponseInfo{}
|
||||
}
|
||||
|
||||
func (BaseApplication) DeliverTx(req RequestDeliverTx) ResponseDeliverTx {
|
||||
return ResponseDeliverTx{Code: CodeTypeOK}
|
||||
}
|
||||
|
||||
func (BaseApplication) CheckTx(req RequestCheckTx) ResponseCheckTx {
|
||||
return ResponseCheckTx{Code: CodeTypeOK}
|
||||
}
|
||||
@@ -66,14 +60,6 @@ func (BaseApplication) InitChain(req RequestInitChain) ResponseInitChain {
|
||||
return ResponseInitChain{}
|
||||
}
|
||||
|
||||
func (BaseApplication) BeginBlock(req RequestBeginBlock) ResponseBeginBlock {
|
||||
return ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock {
|
||||
return ResponseEndBlock{}
|
||||
}
|
||||
|
||||
func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots {
|
||||
return ResponseListSnapshots{}
|
||||
}
|
||||
@@ -90,6 +76,10 @@ func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) Respons
|
||||
return ResponseApplySnapshotChunk{}
|
||||
}
|
||||
|
||||
func (BaseApplication) FinalizeBlock(req RequestFinalizeBlock) ResponseFinalizeBlock {
|
||||
return ResponseFinalizeBlock{}
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
// GRPCApplication is a GRPC wrapper for Application
|
||||
@@ -114,11 +104,6 @@ func (app *GRPCApplication) Info(ctx context.Context, req *RequestInfo) (*Respon
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) DeliverTx(ctx context.Context, req *RequestDeliverTx) (*ResponseDeliverTx, error) {
|
||||
res := app.app.DeliverTx(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) CheckTx(ctx context.Context, req *RequestCheckTx) (*ResponseCheckTx, error) {
|
||||
res := app.app.CheckTx(*req)
|
||||
return &res, nil
|
||||
@@ -139,16 +124,6 @@ func (app *GRPCApplication) InitChain(ctx context.Context, req *RequestInitChain
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) BeginBlock(ctx context.Context, req *RequestBeginBlock) (*ResponseBeginBlock, error) {
|
||||
res := app.app.BeginBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) {
|
||||
res := app.app.EndBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) ListSnapshots(
|
||||
ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) {
|
||||
res := app.app.ListSnapshots(*req)
|
||||
@@ -172,3 +147,9 @@ func (app *GRPCApplication) ApplySnapshotChunk(
|
||||
res := app.app.ApplySnapshotChunk(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *GRPCApplication) FinalizeBlock(
|
||||
ctx context.Context, req *RequestFinalizeBlock) (*ResponseFinalizeBlock, error) {
|
||||
res := app.app.FinalizeBlock(*req)
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
@@ -48,12 +48,6 @@ func ToRequestInfo(req RequestInfo) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestDeliverTx(req RequestDeliverTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_DeliverTx{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestCheckTx(req RequestCheckTx) *Request {
|
||||
return &Request{
|
||||
Value: &Request_CheckTx{&req},
|
||||
@@ -78,18 +72,6 @@ func ToRequestInitChain(req RequestInitChain) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestBeginBlock(req RequestBeginBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_BeginBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestEndBlock(req RequestEndBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_EndBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestListSnapshots(req RequestListSnapshots) *Request {
|
||||
return &Request{
|
||||
Value: &Request_ListSnapshots{&req},
|
||||
@@ -114,6 +96,12 @@ func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request {
|
||||
}
|
||||
}
|
||||
|
||||
func ToRequestFinalizeBlock(req RequestFinalizeBlock) *Request {
|
||||
return &Request{
|
||||
Value: &Request_FinalizeBlock{&req},
|
||||
}
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func ToResponseException(errStr string) *Response {
|
||||
@@ -139,11 +127,6 @@ func ToResponseInfo(res ResponseInfo) *Response {
|
||||
Value: &Response_Info{&res},
|
||||
}
|
||||
}
|
||||
func ToResponseDeliverTx(res ResponseDeliverTx) *Response {
|
||||
return &Response{
|
||||
Value: &Response_DeliverTx{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseCheckTx(res ResponseCheckTx) *Response {
|
||||
return &Response{
|
||||
@@ -169,18 +152,6 @@ func ToResponseInitChain(res ResponseInitChain) *Response {
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseBeginBlock(res ResponseBeginBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_BeginBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseEndBlock(res ResponseEndBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_EndBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseListSnapshots(res ResponseListSnapshots) *Response {
|
||||
return &Response{
|
||||
Value: &Response_ListSnapshots{&res},
|
||||
@@ -204,3 +175,9 @@ func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response {
|
||||
Value: &Response_ApplySnapshotChunk{&res},
|
||||
}
|
||||
}
|
||||
|
||||
func ToResponseFinalizeBlock(res ResponseFinalizeBlock) *Response {
|
||||
return &Response{
|
||||
Value: &Response_FinalizeBlock{&res},
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -31,61 +31,24 @@ For example:
|
||||
|
||||
would be equal to the composite key of `jack.account.number`.
|
||||
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
|
||||
## Configuration
|
||||
|
||||
Operators can configure indexing via the `[tx_index]` section. The `indexer`
|
||||
field takes a series of supported indexers. If `null` is included, indexing will
|
||||
be turned off regardless of other values provided.
|
||||
Let's take a look at the `[tx_index]` config section:
|
||||
|
||||
```toml
|
||||
[tx-index]
|
||||
##### transactions indexer configuration options #####
|
||||
[tx_index]
|
||||
|
||||
# The backend database list to back the indexer.
|
||||
# If list contains null, meaning no indexer service will be used.
|
||||
#
|
||||
# The application will set which txs to index. In some cases a node operator will be able
|
||||
# to decide which txs to index based on configuration set in the application.
|
||||
# What indexer to use for transactions
|
||||
#
|
||||
# Options:
|
||||
# 1) "null"
|
||||
# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend).
|
||||
# - When "kv" is chosen "tx.height" and "tx.hash" will always be indexed.
|
||||
# 3) "psql" - the indexer services backed by PostgreSQL.
|
||||
# indexer = []
|
||||
indexer = "kv"
|
||||
```
|
||||
|
||||
### Supported Indexers
|
||||
By default, Tendermint will index all transactions by their respective hashes
|
||||
and height and blocks by their height.
|
||||
|
||||
#### KV
|
||||
|
||||
The `kv` indexer type is an embedded key-value store supported by the main
|
||||
underling Tendermint database. Using the `kv` indexer type allows you to query
|
||||
for block and transaction events directly against Tendermint's RPC. However, the
|
||||
query syntax is limited and so this indexer type might be deprecated or removed
|
||||
entirely in the future.
|
||||
|
||||
#### PostgreSQL
|
||||
|
||||
The `psql` indexer type allows an operator to enable block and transaction event
|
||||
indexing by proxying it to an external PostgreSQL instance allowing for the events
|
||||
to be stored in relational models. Since the events are stored in a RDBMS, operators
|
||||
can leverage SQL to perform a series of rich and complex queries that are not
|
||||
supported by the `kv` indexer type. Since operators can leverage SQL directly,
|
||||
searching is not enabled for the `psql` indexer type via Tendermint's RPC -- any
|
||||
such query will fail.
|
||||
|
||||
Note, the SQL schema is stored in `state/indexer/sink/psql/schema.sql` and operators
|
||||
must explicitly create the relations prior to starting Tendermint and enabling
|
||||
the `psql` indexer type.
|
||||
|
||||
Example:
|
||||
|
||||
```shell
|
||||
$ psql ... -f state/indexer/sink/psql/schema.sql
|
||||
```
|
||||
You can turn off indexing completely by setting `tx_index` to `null`.
|
||||
|
||||
## Default Indexes
|
||||
|
||||
|
||||
@@ -268,8 +268,6 @@ While we do not favor any operation system, more secure and stable Linux server
|
||||
distributions (like Centos) should be preferred over desktop operation systems
|
||||
(like Mac OS).
|
||||
|
||||
Native Windows support is not provided. If you are using a windows machine, you can try using the [bash shell](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
|
||||
|
||||
### Miscellaneous
|
||||
|
||||
NOTE: if you are going to use Tendermint in a public domain, make sure
|
||||
|
||||
22
go.mod
22
go.mod
@@ -10,47 +10,33 @@ require (
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
github.com/fortytw2/leaktest v1.3.0
|
||||
github.com/go-kit/kit v0.11.0
|
||||
github.com/go-lintpack/lintpack v0.5.2 // indirect
|
||||
github.com/go-kit/kit v0.10.0
|
||||
github.com/gogo/protobuf v1.3.2
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/golangci/errcheck v0.0.0-20181223084120-ef45e06d44b6 // indirect
|
||||
github.com/golangci/go-tools v0.0.0-20190318055746-e32c54105b7c // indirect
|
||||
github.com/golangci/goconst v0.0.0-20180610141641-041c5f2b40f3 // indirect
|
||||
github.com/golangci/gocyclo v0.0.0-20180528134321-2becd97e67ee // indirect
|
||||
github.com/golangci/golangci-lint v1.38.0 // indirect
|
||||
github.com/golangci/gosec v0.0.0-20190211064107-66fb7fc33547 // indirect
|
||||
github.com/golangci/ineffassign v0.0.0-20190609212857-42439a7714cc // indirect
|
||||
github.com/golangci/prealloc v0.0.0-20180630174525-215b22d4de21 // indirect
|
||||
github.com/google/orderedcode v0.0.1
|
||||
github.com/google/uuid v1.2.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0
|
||||
github.com/klauspost/cpuid v1.2.0 // indirect
|
||||
github.com/lib/pq v1.10.2
|
||||
github.com/libp2p/go-buffer-pool v0.0.2
|
||||
github.com/minio/highwayhash v1.0.2
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e // indirect
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/rs/cors v1.8.0
|
||||
github.com/rs/zerolog v1.23.0
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||
github.com/shirou/gopsutil v0.0.0-20180427012116-c95755e4bcd7 // indirect
|
||||
github.com/shirou/w32 v0.0.0-20160930032740-bb4de0191aa4 // indirect
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/cobra v1.2.0
|
||||
github.com/spf13/viper v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tendermint/tm-db v0.6.4
|
||||
golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4
|
||||
google.golang.org/grpc v1.39.0
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2 // indirect
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4 // indirect
|
||||
)
|
||||
|
||||
@@ -83,10 +83,6 @@ type BlockPool struct {
|
||||
|
||||
requestsCh chan<- BlockRequest
|
||||
errorsCh chan<- peerError
|
||||
|
||||
startHeight int64
|
||||
lastHundredBlockTimeStamp time.Time
|
||||
lastSyncRate float64
|
||||
}
|
||||
|
||||
// NewBlockPool returns a new BlockPool with the height equal to start. Block
|
||||
@@ -95,14 +91,12 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
bp := &BlockPool{
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
startHeight: start,
|
||||
numPending: 0,
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
numPending: 0,
|
||||
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
lastSyncRate: 0,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
}
|
||||
bp.BaseService = *service.NewBaseService(nil, "BlockPool", bp)
|
||||
return bp
|
||||
@@ -112,7 +106,6 @@ func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- p
|
||||
// pool's start time.
|
||||
func (pool *BlockPool) OnStart() error {
|
||||
pool.lastAdvance = time.Now()
|
||||
pool.lastHundredBlockTimeStamp = pool.lastAdvance
|
||||
go pool.makeRequestersRoutine()
|
||||
return nil
|
||||
}
|
||||
@@ -223,19 +216,6 @@ func (pool *BlockPool) PopRequest() {
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
pool.lastAdvance = time.Now()
|
||||
|
||||
// the lastSyncRate will be updated every 100 blocks, it uses the adaptive filter
|
||||
// to smooth the block sync rate and the unit represents the number of blocks per second.
|
||||
if (pool.height-pool.startHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(pool.lastHundredBlockTimeStamp).Seconds()
|
||||
if pool.lastSyncRate == 0 {
|
||||
pool.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
pool.lastSyncRate = 0.9*pool.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
pool.lastHundredBlockTimeStamp = time.Now()
|
||||
}
|
||||
|
||||
} else {
|
||||
panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height))
|
||||
}
|
||||
@@ -448,20 +428,6 @@ func (pool *BlockPool) debug() string {
|
||||
return str
|
||||
}
|
||||
|
||||
func (pool *BlockPool) targetSyncBlocks() int64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.maxPeerHeight - pool.startHeight + 1
|
||||
}
|
||||
|
||||
func (pool *BlockPool) getLastSyncRate() float64 {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
return pool.lastSyncRate
|
||||
}
|
||||
|
||||
//-------------------------------------
|
||||
|
||||
type bpPeer struct {
|
||||
|
||||
@@ -2,7 +2,6 @@ package v0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -11,7 +10,6 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmSync "github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
@@ -74,7 +72,7 @@ func (e peerError) Error() string {
|
||||
return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error())
|
||||
}
|
||||
|
||||
// Reactor handles long-term catchup syncing.
|
||||
// BlockchainReactor handles long-term catchup syncing.
|
||||
type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
@@ -85,19 +83,12 @@ type Reactor struct {
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
fastSync *tmSync.AtomicBool
|
||||
fastSync bool
|
||||
|
||||
blockchainCh *p2p.Channel
|
||||
// blockchainOutBridgeCh defines a channel that acts as a bridge between sending Envelope
|
||||
// messages that the reactor will consume in processBlockchainCh and receiving messages
|
||||
// from the peer updates channel and other goroutines. We do this instead of directly
|
||||
// sending on blockchainCh.Out to avoid race conditions in the case where other goroutines
|
||||
// send Envelopes directly to the to blockchainCh.Out channel, since processBlockchainCh
|
||||
// may close the blockchainCh.Out channel at the same time that other goroutines send to
|
||||
// blockchainCh.Out.
|
||||
blockchainOutBridgeCh chan p2p.Envelope
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
closeCh chan struct{}
|
||||
blockchainCh *p2p.Channel
|
||||
peerUpdates *p2p.PeerUpdates
|
||||
peerUpdatesCh chan p2p.Envelope
|
||||
closeCh chan struct{}
|
||||
|
||||
requestsCh <-chan BlockRequest
|
||||
errorsCh <-chan peerError
|
||||
@@ -108,8 +99,6 @@ type Reactor struct {
|
||||
poolWG sync.WaitGroup
|
||||
|
||||
metrics *cons.Metrics
|
||||
|
||||
syncStartTime time.Time
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
@@ -137,20 +126,19 @@ func NewReactor(
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
r := &Reactor{
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: tmSync.NewBool(fastSync),
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
blockchainOutBridgeCh: make(chan p2p.Envelope),
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
syncStartTime: time.Time{},
|
||||
initialState: state,
|
||||
blockExec: blockExec,
|
||||
store: store,
|
||||
pool: NewBlockPool(startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
fastSync: fastSync,
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockchainCh: blockchainCh,
|
||||
peerUpdates: peerUpdates,
|
||||
peerUpdatesCh: make(chan p2p.Envelope),
|
||||
closeCh: make(chan struct{}),
|
||||
metrics: metrics,
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Blockchain", r)
|
||||
@@ -165,7 +153,7 @@ func NewReactor(
|
||||
// If fastSync is enabled, we also start the pool and the pool processing
|
||||
// goroutine. If the pool fails to start, an error is returned.
|
||||
func (r *Reactor) OnStart() error {
|
||||
if r.fastSync.IsSet() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -183,7 +171,7 @@ func (r *Reactor) OnStart() error {
|
||||
// OnStop stops the reactor by signaling to all spawned goroutines to exit and
|
||||
// blocking until they all exit.
|
||||
func (r *Reactor) OnStop() {
|
||||
if r.fastSync.IsSet() {
|
||||
if r.fastSync {
|
||||
if err := r.pool.Stop(); err != nil {
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
@@ -277,11 +265,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -299,7 +283,7 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
}
|
||||
|
||||
// processBlockchainCh initiates a blocking process where we listen for and handle
|
||||
// envelopes on the BlockchainChannel and blockchainOutBridgeCh. Any error encountered during
|
||||
// envelopes on the BlockchainChannel and peerUpdatesCh. Any error encountered during
|
||||
// message execution will result in a PeerError being sent on the BlockchainChannel.
|
||||
// When the reactor is stopped, we will catch the signal and close the p2p Channel
|
||||
// gracefully.
|
||||
@@ -317,8 +301,8 @@ func (r *Reactor) processBlockchainCh() {
|
||||
}
|
||||
}
|
||||
|
||||
case envelope := <-r.blockchainOutBridgeCh:
|
||||
r.blockchainCh.Out <- envelope
|
||||
case envelop := <-r.peerUpdatesCh:
|
||||
r.blockchainCh.Out <- envelop
|
||||
|
||||
case <-r.closeCh:
|
||||
r.Logger.Debug("stopped listening on blockchain channel; closing...")
|
||||
@@ -340,7 +324,7 @@ func (r *Reactor) processPeerUpdate(peerUpdate p2p.PeerUpdate) {
|
||||
switch peerUpdate.Status {
|
||||
case p2p.PeerStatusUp:
|
||||
// send a status update the newly added peer
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
r.peerUpdatesCh <- p2p.Envelope{
|
||||
To: peerUpdate.NodeID,
|
||||
Message: &bcproto.StatusResponse{
|
||||
Base: r.store.Base(),
|
||||
@@ -374,7 +358,7 @@ func (r *Reactor) processPeerUpdates() {
|
||||
// SwitchToFastSync is called by the state sync reactor when switching to fast
|
||||
// sync.
|
||||
func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
r.fastSync.Set()
|
||||
r.fastSync = true
|
||||
r.initialState = state
|
||||
r.pool.height = state.LastBlockHeight + 1
|
||||
|
||||
@@ -382,8 +366,6 @@ func (r *Reactor) SwitchToFastSync(state sm.State) error {
|
||||
return err
|
||||
}
|
||||
|
||||
r.syncStartTime = time.Now()
|
||||
|
||||
r.poolWG.Add(1)
|
||||
go r.poolRoutine(true)
|
||||
|
||||
@@ -406,7 +388,7 @@ func (r *Reactor) requestRoutine() {
|
||||
return
|
||||
|
||||
case request := <-r.requestsCh:
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
r.blockchainCh.Out <- p2p.Envelope{
|
||||
To: request.PeerID,
|
||||
Message: &bcproto.BlockRequest{Height: request.Height},
|
||||
}
|
||||
@@ -423,7 +405,7 @@ func (r *Reactor) requestRoutine() {
|
||||
go func() {
|
||||
defer r.poolWG.Done()
|
||||
|
||||
r.blockchainOutBridgeCh <- p2p.Envelope{
|
||||
r.blockchainCh.Out <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &bcproto.StatusRequest{},
|
||||
}
|
||||
@@ -496,8 +478,6 @@ FOR_LOOP:
|
||||
r.Logger.Error("failed to stop pool", "err", err)
|
||||
}
|
||||
|
||||
r.fastSync.UnSet()
|
||||
|
||||
if r.consReactor != nil {
|
||||
r.consReactor.SwitchToConsensus(state, blocksSynced > 0 || stateSynced)
|
||||
}
|
||||
@@ -612,27 +592,3 @@ FOR_LOOP:
|
||||
func (r *Reactor) GetMaxPeerBlockHeight() int64 {
|
||||
return r.pool.MaxPeerHeight()
|
||||
}
|
||||
|
||||
func (r *Reactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *Reactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.fastSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
targetSyncs := r.pool.targetSyncBlocks()
|
||||
currentSyncs := r.store.Height() - r.pool.startHeight + 1
|
||||
lastSyncRate := r.pool.getLastSyncRate()
|
||||
if currentSyncs < 0 || lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
@@ -215,29 +215,6 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
rts.network.Nodes[rts.nodes[1]].PeerManager.Disconnected(rts.nodes[0])
|
||||
}
|
||||
|
||||
func TestReactor_SyncTime(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, 1, false, 30)
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
require.Equal(t, maxBlockHeight, rts.reactors[rts.nodes[0]].store.Height())
|
||||
rts.start(t)
|
||||
|
||||
require.Eventually(
|
||||
t,
|
||||
func() bool {
|
||||
return rts.reactors[rts.nodes[1]].GetRemainingSyncTime() > time.Nanosecond &&
|
||||
rts.reactors[rts.nodes[1]].pool.getLastSyncRate() > 0.001
|
||||
},
|
||||
10*time.Second,
|
||||
10*time.Millisecond,
|
||||
"expected node to be partially synced",
|
||||
)
|
||||
}
|
||||
|
||||
func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
config := cfg.ResetTestRoot("blockchain_reactor_test")
|
||||
defer os.RemoveAll(config.RootDir)
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blockchain"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -35,8 +34,8 @@ type blockStore interface {
|
||||
type BlockchainReactor struct {
|
||||
p2p.BaseReactor
|
||||
|
||||
fastSync *sync.AtomicBool // enable fast sync on start when it's been Set
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
fastSync bool // if true, enable fast sync on start
|
||||
stateSynced bool // set to true when SwitchToFastSync is called by state sync
|
||||
scheduler *Routine
|
||||
processor *Routine
|
||||
logger log.Logger
|
||||
@@ -49,10 +48,6 @@ type BlockchainReactor struct {
|
||||
reporter behavior.Reporter
|
||||
io iIO
|
||||
store blockStore
|
||||
|
||||
syncStartTime time.Time
|
||||
syncStartHeight int64
|
||||
lastSyncRate float64 // # blocks sync per sec base on the last 100 blocks
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
@@ -73,15 +68,12 @@ func newReactor(state state.State, store blockStore, reporter behavior.Reporter,
|
||||
processor := newPcState(pContext)
|
||||
|
||||
return &BlockchainReactor{
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: sync.NewBool(fastSync),
|
||||
syncStartHeight: initHeight,
|
||||
syncStartTime: time.Time{},
|
||||
lastSyncRate: 0,
|
||||
scheduler: newRoutine("scheduler", scheduler.handle, chBufferSize),
|
||||
processor: newRoutine("processor", processor.handle, chBufferSize),
|
||||
store: store,
|
||||
reporter: reporter,
|
||||
logger: log.NewNopLogger(),
|
||||
fastSync: fastSync,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -137,7 +129,7 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) {
|
||||
// Start implements cmn.Service interface
|
||||
func (r *BlockchainReactor) Start() error {
|
||||
r.reporter = behavior.NewSwitchReporter(r.BaseReactor.Switch)
|
||||
if r.fastSync.IsSet() {
|
||||
if r.fastSync {
|
||||
err := r.startSync(nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to start fast sync: %w", err)
|
||||
@@ -183,13 +175,7 @@ func (r *BlockchainReactor) endSync() {
|
||||
func (r *BlockchainReactor) SwitchToFastSync(state state.State) error {
|
||||
r.stateSynced = true
|
||||
state = state.Copy()
|
||||
|
||||
err := r.startSync(&state)
|
||||
if err == nil {
|
||||
r.syncStartTime = time.Now()
|
||||
}
|
||||
|
||||
return err
|
||||
return r.startSync(&state)
|
||||
}
|
||||
|
||||
// reactor generated ticker events:
|
||||
@@ -297,6 +283,7 @@ func (e bcResetState) String() string {
|
||||
|
||||
// Takes the channel as a parameter to avoid race conditions on r.events.
|
||||
func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
var lastRate = 0.0
|
||||
var lastHundred = time.Now()
|
||||
|
||||
var (
|
||||
@@ -427,15 +414,10 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
switch event := event.(type) {
|
||||
case pcBlockProcessed:
|
||||
r.setSyncHeight(event.height)
|
||||
if (r.syncHeight-r.syncStartHeight)%100 == 0 {
|
||||
newSyncRate := 100 / time.Since(lastHundred).Seconds()
|
||||
if r.lastSyncRate == 0 {
|
||||
r.lastSyncRate = newSyncRate
|
||||
} else {
|
||||
r.lastSyncRate = 0.9*r.lastSyncRate + 0.1*newSyncRate
|
||||
}
|
||||
if r.syncHeight%100 == 0 {
|
||||
lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds())
|
||||
r.logger.Info("Fast Sync Rate", "height", r.syncHeight,
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", r.lastSyncRate)
|
||||
"max_peer_height", r.maxPeerHeight, "blocks/s", lastRate)
|
||||
lastHundred = time.Now()
|
||||
}
|
||||
r.scheduler.send(event)
|
||||
@@ -447,7 +429,6 @@ func (r *BlockchainReactor) demux(events <-chan Event) {
|
||||
r.logger.Error("Failed to switch to consensus reactor")
|
||||
}
|
||||
r.endSync()
|
||||
r.fastSync.UnSet()
|
||||
return
|
||||
case noOpEvent:
|
||||
default:
|
||||
@@ -615,29 +596,3 @@ func (r *BlockchainReactor) GetMaxPeerBlockHeight() int64 {
|
||||
defer r.mtx.RUnlock()
|
||||
return r.maxPeerHeight
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetTotalSyncedTime() time.Duration {
|
||||
if !r.fastSync.IsSet() || r.syncStartTime.IsZero() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
return time.Since(r.syncStartTime)
|
||||
}
|
||||
|
||||
func (r *BlockchainReactor) GetRemainingSyncTime() time.Duration {
|
||||
if !r.fastSync.IsSet() {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
targetSyncs := r.maxPeerHeight - r.syncStartHeight
|
||||
currentSyncs := r.syncHeight - r.syncStartHeight + 1
|
||||
if currentSyncs < 0 || r.lastSyncRate < 0.001 {
|
||||
return time.Duration(0)
|
||||
}
|
||||
|
||||
remain := float64(targetSyncs-currentSyncs) / r.lastSyncRate
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
@@ -161,8 +161,8 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(0))
|
||||
|
||||
resDeliver := app.DeliverTx(abci.RequestDeliverTx{Tx: txBytes})
|
||||
assert.False(t, resDeliver.IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
|
||||
resDeliver := app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
assert.False(t, resDeliver.Txs[0].IsErr(), fmt.Sprintf("expected no error. got %v", resDeliver))
|
||||
|
||||
resCommit := app.Commit()
|
||||
assert.True(t, len(resCommit.Data) > 0)
|
||||
@@ -233,15 +233,16 @@ func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
|
||||
return abci.ResponseInfo{Data: fmt.Sprintf("txs:%v", app.txCount)}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
txValue := txAsUint64(req.Tx)
|
||||
func (app *CounterApplication) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
|
||||
txValue := txAsUint64(req.Txs[0])
|
||||
if txValue != uint64(app.txCount) {
|
||||
return abci.ResponseDeliverTx{
|
||||
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{
|
||||
Code: code.CodeTypeBadNonce,
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}
|
||||
Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)}},
|
||||
}
|
||||
}
|
||||
app.txCount++
|
||||
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
return abci.ResponseFinalizeBlock{Txs: []*abci.ResponseDeliverTx{{Code: code.CodeTypeOK}}}
|
||||
}
|
||||
|
||||
func (app *CounterApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
|
||||
@@ -2,7 +2,6 @@ package consensus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"time"
|
||||
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
@@ -102,14 +101,6 @@ type FastSyncReactor interface {
|
||||
SwitchToFastSync(sm.State) error
|
||||
|
||||
GetMaxPeerBlockHeight() int64
|
||||
|
||||
// GetTotalSyncedTime returns the time duration since the fastsync starting.
|
||||
GetTotalSyncedTime() time.Duration
|
||||
|
||||
// GetRemainingSyncTime returns the estimating time the node will be fully synced,
|
||||
// if will return 0 if the fastsync does not perform or the number of block synced is
|
||||
// too small (less than 100).
|
||||
GetRemainingSyncTime() time.Duration
|
||||
}
|
||||
|
||||
// Reactor defines a reactor for the consensus service.
|
||||
@@ -1206,24 +1197,21 @@ func (r *Reactor) handleVoteSetBitsMessage(envelope p2p.Envelope, msgI Message)
|
||||
// It will handle errors and any possible panics gracefully. A caller can handle
|
||||
// any error returned by sending a PeerError on the respective channel.
|
||||
//
|
||||
// NOTE: We process these messages even when we're fast_syncing. Messages affect
|
||||
// either a peer state or the consensus state. Peer state updates can happen in
|
||||
// parallel, but processing of proposals, block parts, and votes are ordered by
|
||||
// the p2p channel.
|
||||
//
|
||||
// NOTE: We block on consensus state for proposals, block parts, and votes.
|
||||
func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err error) {
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
r.Logger.Error("recovering from processing message panic", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Just skip the entire message during syncing so that we can
|
||||
// process fewer messages.
|
||||
if r.WaitSync() {
|
||||
return
|
||||
}
|
||||
|
||||
// We wrap the envelope's message in a Proto wire type so we can convert back
|
||||
// the domain type that individual channel message handlers can work with. We
|
||||
// do this here once to avoid having to do it for each individual message type.
|
||||
|
||||
@@ -73,20 +73,15 @@ type mockProxyApp struct {
|
||||
abciResponses *tmstate.ABCIResponses
|
||||
}
|
||||
|
||||
func (mock *mockProxyApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
r := mock.abciResponses.DeliverTxs[mock.txCount]
|
||||
func (mock *mockProxyApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
|
||||
r := mock.abciResponses.FinalizeBlock
|
||||
mock.txCount++
|
||||
if r == nil {
|
||||
return abci.ResponseDeliverTx{}
|
||||
return abci.ResponseFinalizeBlock{}
|
||||
}
|
||||
return *r
|
||||
}
|
||||
|
||||
func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
mock.txCount = 0
|
||||
return *mock.abciResponses.EndBlock
|
||||
}
|
||||
|
||||
func (mock *mockProxyApp) Commit() abci.ResponseCommit {
|
||||
return abci.ResponseCommit{Data: mock.appHash}
|
||||
}
|
||||
|
||||
@@ -619,8 +619,8 @@ func TestMockProxyApp(t *testing.T) {
|
||||
|
||||
assert.NotPanics(t, func() {
|
||||
abciResWithEmptyDeliverTx := new(tmstate.ABCIResponses)
|
||||
abciResWithEmptyDeliverTx.DeliverTxs = make([]*abci.ResponseDeliverTx, 0)
|
||||
abciResWithEmptyDeliverTx.DeliverTxs = append(abciResWithEmptyDeliverTx.DeliverTxs, &abci.ResponseDeliverTx{})
|
||||
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, 0)
|
||||
abciResWithEmptyDeliverTx.FinalizeBlock.Txs = append(abciResWithEmptyDeliverTx.FinalizeBlock.Txs, &abci.ResponseDeliverTx{})
|
||||
|
||||
// called when saveABCIResponses:
|
||||
bytes, err := proto.Marshal(abciResWithEmptyDeliverTx)
|
||||
@@ -634,28 +634,30 @@ func TestMockProxyApp(t *testing.T) {
|
||||
mock := newMockProxyApp([]byte("mock_hash"), loadedAbciRes)
|
||||
|
||||
abciRes := new(tmstate.ABCIResponses)
|
||||
abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))
|
||||
abciRes.FinalizeBlock.Txs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.FinalizeBlock.Txs))
|
||||
// Execute transactions and get hash.
|
||||
proxyCb := func(req *abci.Request, res *abci.Response) {
|
||||
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := r.DeliverTx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
if r, ok := res.Value.(*abci.Response_FinalizeBlock); ok {
|
||||
for i, tx := range r.FinalizeBlock.Txs {
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := tx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
}
|
||||
abciRes.FinalizeBlock.Txs[i] = txRes
|
||||
txIndex++
|
||||
}
|
||||
abciRes.DeliverTxs[txIndex] = txRes
|
||||
txIndex++
|
||||
}
|
||||
}
|
||||
mock.SetResponseCallback(proxyCb)
|
||||
|
||||
someTx := []byte("tx")
|
||||
_, err = mock.DeliverTxAsync(context.Background(), abci.RequestDeliverTx{Tx: someTx})
|
||||
_, err = mock.FinalizeBlockSync(context.Background(), abci.RequestFinalizeBlock{Txs: [][]byte{someTx}})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.True(t, validTxs == 1)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package evidence
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -166,11 +165,6 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -302,11 +296,7 @@ func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Clos
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting evidence loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
r.Logger.Error("recovering from broadcasting evidence loop", "err", e)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -146,7 +146,7 @@ func (g *Group) OnStart() error {
|
||||
func (g *Group) OnStop() {
|
||||
g.ticker.Stop()
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushing to disk", "err", err)
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +160,7 @@ func (g *Group) Wait() {
|
||||
// Close closes the head file. The group must be stopped by this moment.
|
||||
func (g *Group) Close() {
|
||||
if err := g.FlushAndSync(); err != nil {
|
||||
g.Logger.Error("Error flushing to disk", "err", err)
|
||||
g.Logger.Error("Error flushin to disk", "err", err)
|
||||
}
|
||||
|
||||
g.mtx.Lock()
|
||||
|
||||
@@ -236,8 +236,7 @@ func TestMempool_KeepInvalidTxsInCache(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// simulate new block
|
||||
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: a})
|
||||
_ = app.DeliverTx(abci.RequestDeliverTx{Tx: b})
|
||||
_ = app.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{a, b}})
|
||||
err = mp.Update(1, []types.Tx{a, b},
|
||||
[]*abci.ResponseDeliverTx{{Code: abci.CodeTypeOK}, {Code: 2}}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
@@ -363,24 +362,29 @@ func TestSerialReap(t *testing.T) {
|
||||
commitRange := func(start, end int) {
|
||||
ctx := context.Background()
|
||||
// Deliver some txs.
|
||||
var txs = make([][]byte, end)
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
t.Errorf("client error committing tx: %v", err)
|
||||
}
|
||||
if res.IsErr() {
|
||||
txs[i] = txBytes
|
||||
}
|
||||
res, err := appConnCon.FinalizeBlockSync(ctx, abci.RequestFinalizeBlock{Txs: txs})
|
||||
if err != nil {
|
||||
t.Errorf("client error committing tx: %v", err)
|
||||
}
|
||||
for _, tx := range res.Txs {
|
||||
if tx.IsErr() {
|
||||
t.Errorf("error committing tx. Code:%v result:%X log:%v",
|
||||
res.Code, res.Data, res.Log)
|
||||
tx.Code, tx.Data, tx.Log)
|
||||
}
|
||||
}
|
||||
res, err := appConnCon.CommitSync(ctx)
|
||||
|
||||
resCommit, err := appConnCon.CommitSync(ctx)
|
||||
if err != nil {
|
||||
t.Errorf("client error committing: %v", err)
|
||||
}
|
||||
if len(res.Data) != 8 {
|
||||
t.Errorf("error committing. Hash:%X", res.Data)
|
||||
if len(resCommit.Data) != 8 {
|
||||
t.Errorf("error committing. Hash:%X", resCommit.Data)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -405,7 +409,7 @@ func TestSerialReap(t *testing.T) {
|
||||
// Reap again. We should get the same amount
|
||||
reapCheck(1000)
|
||||
|
||||
// Commit from the conensus AppConn
|
||||
// Commit from the consensus AppConn
|
||||
commitRange(0, 500)
|
||||
updateRange(0, 500)
|
||||
|
||||
@@ -530,9 +534,11 @@ func TestMempoolTxsBytes(t *testing.T) {
|
||||
}
|
||||
})
|
||||
ctx := context.Background()
|
||||
res, err := appConnCon.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := appConnCon.FinalizeBlockSync(ctx, abci.RequestFinalizeBlock{Txs: [][]byte{txBytes}})
|
||||
require.NoError(t, err)
|
||||
require.EqualValues(t, 0, res.Code)
|
||||
for _, tx := range res.Txs {
|
||||
require.EqualValues(t, 0, tx.Code)
|
||||
}
|
||||
res2, err := appConnCon.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, res2.Data)
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -189,11 +188,6 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -318,11 +312,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting mempool loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
r.Logger.Error("recovering from broadcasting mempool loop", "err", e)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -188,11 +187,6 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -317,11 +311,7 @@ func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer)
|
||||
r.peerWG.Done()
|
||||
|
||||
if e := recover(); e != nil {
|
||||
r.Logger.Error(
|
||||
"recovering from broadcasting mempool loop",
|
||||
"err", e,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
r.Logger.Error("recovering from broadcasting mempool loop", "err", e)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@ package pex
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -368,11 +367,6 @@ func (r *ReactorV2) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (er
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -476,6 +476,8 @@ func (r *Router) routeChannel(
|
||||
}
|
||||
|
||||
if !contains {
|
||||
r.logger.Error("tried to send message across a channel that the peer doesn't have available",
|
||||
"peer", envelope.To, "channel", chID)
|
||||
continue
|
||||
}
|
||||
|
||||
|
||||
@@ -57,7 +57,7 @@ func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.Light
|
||||
|
||||
// fetch the next peer id in the list and request a light block from that
|
||||
// peer
|
||||
peer := d.availablePeers.Pop(ctx)
|
||||
peer := d.availablePeers.Pop()
|
||||
lb, err := d.lightBlock(ctx, height, peer)
|
||||
return lb, peer, err
|
||||
}
|
||||
@@ -272,7 +272,7 @@ func (l *peerlist) Len() int {
|
||||
return len(l.peers)
|
||||
}
|
||||
|
||||
func (l *peerlist) Pop(ctx context.Context) types.NodeID {
|
||||
func (l *peerlist) Pop() types.NodeID {
|
||||
l.mtx.Lock()
|
||||
if len(l.peers) == 0 {
|
||||
// if we don't have any peers in the list we block until a peer is
|
||||
@@ -281,13 +281,8 @@ func (l *peerlist) Pop(ctx context.Context) types.NodeID {
|
||||
l.waiting = append(l.waiting, wait)
|
||||
// unlock whilst waiting so that the list can be appended to
|
||||
l.mtx.Unlock()
|
||||
select {
|
||||
case peer := <-wait:
|
||||
return peer
|
||||
|
||||
case <-ctx.Done():
|
||||
return ""
|
||||
}
|
||||
peer := <-wait
|
||||
return peer
|
||||
}
|
||||
|
||||
peer := l.peers[0]
|
||||
|
||||
@@ -95,7 +95,7 @@ func TestPeerListBasic(t *testing.T) {
|
||||
|
||||
half := numPeers / 2
|
||||
for i := 0; i < half; i++ {
|
||||
assert.Equal(t, peerSet[i], peerList.Pop(ctx))
|
||||
assert.Equal(t, peerSet[i], peerList.Pop())
|
||||
}
|
||||
assert.Equal(t, half, peerList.Len())
|
||||
|
||||
@@ -104,7 +104,7 @@ func TestPeerListBasic(t *testing.T) {
|
||||
|
||||
peerList.Remove(peerSet[half])
|
||||
half++
|
||||
assert.Equal(t, peerSet[half], peerList.Pop(ctx))
|
||||
assert.Equal(t, peerSet[half], peerList.Pop())
|
||||
|
||||
}
|
||||
|
||||
@@ -117,7 +117,7 @@ func TestPeerListConcurrent(t *testing.T) {
|
||||
// peer list hasn't been populated each these go routines should block
|
||||
for i := 0; i < numPeers/2; i++ {
|
||||
go func() {
|
||||
_ = peerList.Pop(ctx)
|
||||
_ = peerList.Pop()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
@@ -132,7 +132,7 @@ func TestPeerListConcurrent(t *testing.T) {
|
||||
// we request the second half of the peer set
|
||||
for i := 0; i < numPeers/2; i++ {
|
||||
go func() {
|
||||
_ = peerList.Pop(ctx)
|
||||
_ = peerList.Pop()
|
||||
wg.Done()
|
||||
}()
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -6,7 +6,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
@@ -311,17 +310,12 @@ func (r *Reactor) backfill(
|
||||
// waiting on blocks. If it takes 4s to retrieve a block and 1s to verify
|
||||
// it, then steady state involves four workers.
|
||||
for i := 0; i < int(r.cfg.Fetchers); i++ {
|
||||
ctxWithCancel, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
go func() {
|
||||
for {
|
||||
select {
|
||||
case height := <-queue.nextHeight():
|
||||
r.Logger.Debug("fetching next block", "height", height)
|
||||
lb, peer, err := r.dispatcher.LightBlock(ctxWithCancel, height)
|
||||
if errors.Is(err, context.Canceled) {
|
||||
return
|
||||
}
|
||||
lb, peer, err := r.dispatcher.LightBlock(ctx, height)
|
||||
if err != nil {
|
||||
queue.retry(height)
|
||||
if errors.Is(err, errNoConnectedPeers) {
|
||||
@@ -645,11 +639,6 @@ func (r *Reactor) handleMessage(chID p2p.ChannelID, envelope p2p.Envelope) (err
|
||||
defer func() {
|
||||
if e := recover(); e != nil {
|
||||
err = fmt.Errorf("panic in processing message: %v", e)
|
||||
r.Logger.Error(
|
||||
"recovering from processing message panic",
|
||||
"err", err,
|
||||
"stack", string(debug.Stack()),
|
||||
)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -3,11 +3,12 @@ package statesync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
// "github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
@@ -420,11 +421,11 @@ func TestReactor_Dispatcher(t *testing.T) {
|
||||
|
||||
func TestReactor_Backfill(t *testing.T) {
|
||||
// test backfill algorithm with varying failure rates [0, 10]
|
||||
failureRates := []int{0, 2, 9}
|
||||
failureRates := []int{0, 3, 9}
|
||||
for _, failureRate := range failureRates {
|
||||
failureRate := failureRate
|
||||
t.Run(fmt.Sprintf("failure rate: %d", failureRate), func(t *testing.T) {
|
||||
t.Cleanup(leaktest.CheckTimeout(t, 1*time.Minute))
|
||||
// t.Cleanup(leaktest.Check(t))
|
||||
rts := setup(t, nil, nil, nil, 21)
|
||||
|
||||
var (
|
||||
@@ -466,7 +467,7 @@ func TestReactor_Backfill(t *testing.T) {
|
||||
factory.MakeBlockIDWithHash(chain[startHeight].Header.Hash()),
|
||||
stopTime,
|
||||
)
|
||||
if failureRate > 3 {
|
||||
if failureRate > 5 {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
@@ -505,7 +506,6 @@ func handleLightBlockRequests(t *testing.T,
|
||||
close chan struct{},
|
||||
failureRate int) {
|
||||
requests := 0
|
||||
errorCount := 0
|
||||
for {
|
||||
select {
|
||||
case envelope := <-receiving:
|
||||
@@ -520,7 +520,7 @@ func handleLightBlockRequests(t *testing.T,
|
||||
},
|
||||
}
|
||||
} else {
|
||||
switch errorCount % 3 {
|
||||
switch rand.Intn(3) {
|
||||
case 0: // send a different block
|
||||
differntLB, err := mockLB(t, int64(msg.Height), factory.DefaultTestTime, factory.MakeBlockID()).ToProto()
|
||||
require.NoError(t, err)
|
||||
@@ -539,7 +539,6 @@ func handleLightBlockRequests(t *testing.T,
|
||||
}
|
||||
case 2: // don't do anything
|
||||
}
|
||||
errorCount++
|
||||
}
|
||||
}
|
||||
case <-close:
|
||||
|
||||
@@ -80,7 +80,7 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool {
|
||||
// snapshot height is verified using the light client, and the expected app hash
|
||||
// is set for the snapshot.
|
||||
func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error) {
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
appHash, err := p.stateProvider.AppHash(ctx, snapshot.Height)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
// nolint
|
||||
package query
|
||||
|
||||
import (
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
package sync
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// AtomicBool is an atomic Boolean.
|
||||
// Its methods are all atomic, thus safe to be called by multiple goroutines simultaneously.
|
||||
// Note: When embedding into a struct one should always use *AtomicBool to avoid copy.
|
||||
// it's a simple implmentation from https://github.com/tevino/abool
|
||||
type AtomicBool int32
|
||||
|
||||
// NewBool creates an AtomicBool with given default value.
|
||||
func NewBool(ok bool) *AtomicBool {
|
||||
ab := new(AtomicBool)
|
||||
if ok {
|
||||
ab.Set()
|
||||
}
|
||||
return ab
|
||||
}
|
||||
|
||||
// Set sets the Boolean to true.
|
||||
func (ab *AtomicBool) Set() {
|
||||
atomic.StoreInt32((*int32)(ab), 1)
|
||||
}
|
||||
|
||||
// UnSet sets the Boolean to false.
|
||||
func (ab *AtomicBool) UnSet() {
|
||||
atomic.StoreInt32((*int32)(ab), 0)
|
||||
}
|
||||
|
||||
// IsSet returns whether the Boolean is true.
|
||||
func (ab *AtomicBool) IsSet() bool {
|
||||
return atomic.LoadInt32((*int32)(ab))&1 == 1
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestDefaultValue(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := NewBool(false)
|
||||
assert.False(t, v.IsSet())
|
||||
|
||||
v = NewBool(true)
|
||||
assert.True(t, v.IsSet())
|
||||
}
|
||||
|
||||
func TestSetUnSet(t *testing.T) {
|
||||
t.Parallel()
|
||||
v := NewBool(false)
|
||||
|
||||
v.Set()
|
||||
assert.True(t, v.IsSet())
|
||||
|
||||
v.UnSet()
|
||||
assert.False(t, v.IsSet())
|
||||
}
|
||||
@@ -407,9 +407,9 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// proto-encode BeginBlock events
|
||||
bbeBytes, err := proto.Marshal(&abci.ResponseBeginBlock{
|
||||
Events: res.BeginBlockEvents,
|
||||
// proto-encode FinalizeBlock events
|
||||
bbeBytes, err := proto.Marshal(&abci.ResponseFinalizeBlock{
|
||||
Events: res.FinalizeBlockEvents,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -418,16 +418,8 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul
|
||||
// Build a Merkle tree of proto-encoded DeliverTx results and get a hash.
|
||||
results := types.NewResults(res.TxsResults)
|
||||
|
||||
// proto-encode EndBlock events.
|
||||
ebeBytes, err := proto.Marshal(&abci.ResponseEndBlock{
|
||||
Events: res.EndBlockEvents,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Build a Merkle tree out of the above 3 binary slices.
|
||||
rH := merkle.HashFromByteSlices([][]byte{bbeBytes, results.Hash(), ebeBytes})
|
||||
rH := merkle.HashFromByteSlices([][]byte{bbeBytes, results.Hash()})
|
||||
|
||||
// Verify block results.
|
||||
if !bytes.Equal(rH, trustedBlock.LastResultsHash) {
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -81,14 +81,13 @@ func createAndStartIndexerService(
|
||||
|
||||
eventSinks := []indexer.EventSink{}
|
||||
|
||||
// check for duplicated sinks
|
||||
// Check duplicated sinks.
|
||||
sinks := map[string]bool{}
|
||||
for _, s := range config.TxIndex.Indexer {
|
||||
sl := strings.ToLower(s)
|
||||
if sinks[sl] {
|
||||
return nil, nil, errors.New("found duplicated sinks, please check the tx-index section in the config.toml")
|
||||
}
|
||||
|
||||
sinks[sl] = true
|
||||
}
|
||||
|
||||
@@ -96,31 +95,25 @@ loop:
|
||||
for k := range sinks {
|
||||
switch k {
|
||||
case string(indexer.NULL):
|
||||
// When we see null in the config, the eventsinks will be reset with the
|
||||
// nullEventSink.
|
||||
// when we see null in the config, the eventsinks will be reset with the nullEventSink.
|
||||
eventSinks = []indexer.EventSink{null.NewEventSink()}
|
||||
break loop
|
||||
|
||||
case string(indexer.KV):
|
||||
store, err := dbProvider(&cfg.DBContext{ID: "tx_index", Config: config})
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
eventSinks = append(eventSinks, kv.NewEventSink(store))
|
||||
|
||||
case string(indexer.PSQL):
|
||||
conn := config.TxIndex.PsqlConn
|
||||
if conn == "" {
|
||||
return nil, nil, errors.New("the psql connection settings cannot be empty")
|
||||
}
|
||||
|
||||
es, _, err := psql.NewEventSink(conn, chainID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
eventSinks = append(eventSinks, es)
|
||||
|
||||
default:
|
||||
return nil, nil, errors.New("unsupported event sink type")
|
||||
}
|
||||
|
||||
@@ -21,20 +21,21 @@ import "gogoproto/gogo.proto";
|
||||
|
||||
message Request {
|
||||
oneof value {
|
||||
RequestEcho echo = 1;
|
||||
RequestFlush flush = 2;
|
||||
RequestInfo info = 3;
|
||||
RequestInitChain init_chain = 4;
|
||||
RequestQuery query = 5;
|
||||
RequestBeginBlock begin_block = 6;
|
||||
RequestCheckTx check_tx = 7;
|
||||
RequestDeliverTx deliver_tx = 8;
|
||||
RequestEndBlock end_block = 9;
|
||||
RequestEcho echo = 1;
|
||||
RequestFlush flush = 2;
|
||||
RequestInfo info = 3;
|
||||
RequestInitChain init_chain = 4;
|
||||
RequestQuery query = 5;
|
||||
// RequestBeginBlock begin_block = 6;
|
||||
RequestCheckTx check_tx = 7;
|
||||
// RequestDeliverTx deliver_tx = 8;
|
||||
// RequestEndBlock end_block = 9;
|
||||
RequestCommit commit = 10;
|
||||
RequestListSnapshots list_snapshots = 11;
|
||||
RequestOfferSnapshot offer_snapshot = 12;
|
||||
RequestLoadSnapshotChunk load_snapshot_chunk = 13;
|
||||
RequestApplySnapshotChunk apply_snapshot_chunk = 14;
|
||||
RequestFinalizeBlock finalize_block = 15;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,13 +68,6 @@ message RequestQuery {
|
||||
bool prove = 4;
|
||||
}
|
||||
|
||||
message RequestBeginBlock {
|
||||
bytes hash = 1;
|
||||
tendermint.types.Header header = 2 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 3 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 4 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
enum CheckTxType {
|
||||
NEW = 0 [(gogoproto.enumvalue_customname) = "New"];
|
||||
RECHECK = 1 [(gogoproto.enumvalue_customname) = "Recheck"];
|
||||
@@ -84,14 +78,6 @@ message RequestCheckTx {
|
||||
CheckTxType type = 2;
|
||||
}
|
||||
|
||||
message RequestDeliverTx {
|
||||
bytes tx = 1;
|
||||
}
|
||||
|
||||
message RequestEndBlock {
|
||||
int64 height = 1;
|
||||
}
|
||||
|
||||
message RequestCommit {}
|
||||
|
||||
// lists available snapshots
|
||||
@@ -117,26 +103,35 @@ message RequestApplySnapshotChunk {
|
||||
string sender = 3;
|
||||
}
|
||||
|
||||
message RequestFinalizeBlock {
|
||||
repeated bytes txs = 1;
|
||||
bytes hash = 2;
|
||||
tendermint.types.Header header = 3 [(gogoproto.nullable) = false];
|
||||
LastCommitInfo last_commit_info = 4 [(gogoproto.nullable) = false];
|
||||
repeated Evidence byzantine_validators = 5 [(gogoproto.nullable) = false];
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Response types
|
||||
|
||||
message Response {
|
||||
oneof value {
|
||||
ResponseException exception = 1;
|
||||
ResponseEcho echo = 2;
|
||||
ResponseFlush flush = 3;
|
||||
ResponseInfo info = 4;
|
||||
ResponseInitChain init_chain = 5;
|
||||
ResponseQuery query = 6;
|
||||
ResponseBeginBlock begin_block = 7;
|
||||
ResponseCheckTx check_tx = 8;
|
||||
ResponseDeliverTx deliver_tx = 9;
|
||||
ResponseEndBlock end_block = 10;
|
||||
ResponseException exception = 1;
|
||||
ResponseEcho echo = 2;
|
||||
ResponseFlush flush = 3;
|
||||
ResponseInfo info = 4;
|
||||
ResponseInitChain init_chain = 5;
|
||||
ResponseQuery query = 6;
|
||||
// ResponseBeginBlock begin_block = 7;
|
||||
ResponseCheckTx check_tx = 8;
|
||||
// ResponseDeliverTx deliver_tx = 9;
|
||||
// ResponseEndBlock end_block = 10;
|
||||
ResponseCommit commit = 11;
|
||||
ResponseListSnapshots list_snapshots = 12;
|
||||
ResponseOfferSnapshot offer_snapshot = 13;
|
||||
ResponseLoadSnapshotChunk load_snapshot_chunk = 14;
|
||||
ResponseApplySnapshotChunk apply_snapshot_chunk = 15;
|
||||
ResponseFinalizeBlock finalize_block = 16;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,10 +176,6 @@ message ResponseQuery {
|
||||
string codespace = 10;
|
||||
}
|
||||
|
||||
message ResponseBeginBlock {
|
||||
repeated Event events = 1 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
|
||||
}
|
||||
|
||||
message ResponseCheckTx {
|
||||
uint32 code = 1;
|
||||
bytes data = 2;
|
||||
@@ -210,12 +201,6 @@ message ResponseDeliverTx {
|
||||
string codespace = 8;
|
||||
}
|
||||
|
||||
message ResponseEndBlock {
|
||||
repeated ValidatorUpdate validator_updates = 1 [(gogoproto.nullable) = false];
|
||||
tendermint.types.ConsensusParams consensus_param_updates = 2;
|
||||
repeated Event events = 3 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
|
||||
}
|
||||
|
||||
message ResponseCommit {
|
||||
// reserve 1
|
||||
bytes data = 2;
|
||||
@@ -258,6 +243,13 @@ message ResponseApplySnapshotChunk {
|
||||
}
|
||||
}
|
||||
|
||||
message ResponseFinalizeBlock {
|
||||
repeated ResponseDeliverTx txs = 1;
|
||||
repeated ValidatorUpdate validator_updates = 2 [(gogoproto.nullable) = false];
|
||||
tendermint.types.ConsensusParams consensus_param_updates = 3;
|
||||
repeated Event events = 4 [(gogoproto.nullable) = false, (gogoproto.jsontag) = "events,omitempty"];
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
// Misc.
|
||||
|
||||
@@ -351,15 +343,13 @@ service ABCIApplication {
|
||||
rpc Echo(RequestEcho) returns (ResponseEcho);
|
||||
rpc Flush(RequestFlush) returns (ResponseFlush);
|
||||
rpc Info(RequestInfo) returns (ResponseInfo);
|
||||
rpc DeliverTx(RequestDeliverTx) returns (ResponseDeliverTx);
|
||||
rpc CheckTx(RequestCheckTx) returns (ResponseCheckTx);
|
||||
rpc Query(RequestQuery) returns (ResponseQuery);
|
||||
rpc Commit(RequestCommit) returns (ResponseCommit);
|
||||
rpc InitChain(RequestInitChain) returns (ResponseInitChain);
|
||||
rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock);
|
||||
rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock);
|
||||
rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots);
|
||||
rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot);
|
||||
rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk);
|
||||
rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk);
|
||||
rpc FinalizeBlock(RequestFinalizeBlock) returns (ResponseFinalizeBlock);
|
||||
}
|
||||
|
||||
@@ -34,9 +34,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package
|
||||
// of the various ABCI calls during block processing.
|
||||
// It is persisted to disk for each height before calling Commit.
|
||||
type ABCIResponses struct {
|
||||
DeliverTxs []*types.ResponseDeliverTx `protobuf:"bytes,1,rep,name=deliver_txs,json=deliverTxs,proto3" json:"deliver_txs,omitempty"`
|
||||
EndBlock *types.ResponseEndBlock `protobuf:"bytes,2,opt,name=end_block,json=endBlock,proto3" json:"end_block,omitempty"`
|
||||
BeginBlock *types.ResponseBeginBlock `protobuf:"bytes,3,opt,name=begin_block,json=beginBlock,proto3" json:"begin_block,omitempty"`
|
||||
FinalizeBlock *types.ResponseFinalizeBlock `protobuf:"bytes,2,opt,name=finalize_block,json=finalizeBlock,proto3" json:"finalize_block,omitempty"`
|
||||
}
|
||||
|
||||
func (m *ABCIResponses) Reset() { *m = ABCIResponses{} }
|
||||
@@ -72,23 +70,9 @@ func (m *ABCIResponses) XXX_DiscardUnknown() {
|
||||
|
||||
var xxx_messageInfo_ABCIResponses proto.InternalMessageInfo
|
||||
|
||||
func (m *ABCIResponses) GetDeliverTxs() []*types.ResponseDeliverTx {
|
||||
func (m *ABCIResponses) GetFinalizeBlock() *types.ResponseFinalizeBlock {
|
||||
if m != nil {
|
||||
return m.DeliverTxs
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponses) GetEndBlock() *types.ResponseEndBlock {
|
||||
if m != nil {
|
||||
return m.EndBlock
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ABCIResponses) GetBeginBlock() *types.ResponseBeginBlock {
|
||||
if m != nil {
|
||||
return m.BeginBlock
|
||||
return m.FinalizeBlock
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -422,55 +406,52 @@ func init() {
|
||||
func init() { proto.RegisterFile("tendermint/state/types.proto", fileDescriptor_ccfacf933f22bf93) }
|
||||
|
||||
var fileDescriptor_ccfacf933f22bf93 = []byte{
|
||||
// 763 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x6f, 0xd3, 0x30,
|
||||
0x14, 0x6e, 0xe8, 0xb6, 0xb6, 0xce, 0xda, 0x0e, 0x8f, 0x43, 0xd6, 0xb1, 0xb4, 0x2b, 0x3f, 0x34,
|
||||
0x71, 0x48, 0xa5, 0x71, 0x40, 0x5c, 0x26, 0x2d, 0x2d, 0x62, 0x95, 0x26, 0x04, 0xd9, 0xb4, 0x03,
|
||||
0x97, 0xc8, 0x6d, 0xbc, 0x24, 0xa2, 0x4d, 0xa2, 0xd8, 0x2d, 0xe3, 0x0f, 0xe0, 0xbe, 0x2b, 0xff,
|
||||
0xd1, 0x8e, 0x3b, 0x22, 0x0e, 0x03, 0xba, 0x7f, 0x04, 0xd9, 0xce, 0x0f, 0xb7, 0x65, 0xd2, 0x10,
|
||||
0x37, 0xfb, 0x7d, 0xdf, 0xfb, 0xfc, 0xf9, 0xf9, 0x3d, 0x19, 0x3c, 0xa6, 0x38, 0x70, 0x70, 0x3c,
|
||||
0xf6, 0x03, 0xda, 0x21, 0x14, 0x51, 0xdc, 0xa1, 0x5f, 0x22, 0x4c, 0x8c, 0x28, 0x0e, 0x69, 0x08,
|
||||
0x37, 0x72, 0xd4, 0xe0, 0x68, 0xe3, 0x91, 0x1b, 0xba, 0x21, 0x07, 0x3b, 0x6c, 0x25, 0x78, 0x8d,
|
||||
0x6d, 0x49, 0x05, 0x0d, 0x86, 0xbe, 0x2c, 0xd2, 0x90, 0x8f, 0xe0, 0xf1, 0x39, 0xb4, 0xb5, 0x84,
|
||||
0x4e, 0xd1, 0xc8, 0x77, 0x10, 0x0d, 0xe3, 0x84, 0xb1, 0xb3, 0xc4, 0x88, 0x50, 0x8c, 0xc6, 0xa9,
|
||||
0x80, 0x2e, 0xc1, 0x53, 0x1c, 0x13, 0x3f, 0x0c, 0xe6, 0x0e, 0x68, 0xba, 0x61, 0xe8, 0x8e, 0x70,
|
||||
0x87, 0xef, 0x06, 0x93, 0xf3, 0x0e, 0xf5, 0xc7, 0x98, 0x50, 0x34, 0x8e, 0x04, 0xa1, 0xfd, 0x43,
|
||||
0x01, 0xd5, 0x43, 0xb3, 0xdb, 0xb7, 0x30, 0x89, 0xc2, 0x80, 0x60, 0x02, 0xbb, 0x40, 0x75, 0xf0,
|
||||
0xc8, 0x9f, 0xe2, 0xd8, 0xa6, 0x17, 0x44, 0x53, 0x5a, 0xc5, 0x3d, 0x75, 0xbf, 0x6d, 0x48, 0xc5,
|
||||
0x60, 0x97, 0x34, 0xd2, 0x84, 0x9e, 0xe0, 0x9e, 0x5e, 0x58, 0xc0, 0x49, 0x97, 0x04, 0x1e, 0x80,
|
||||
0x0a, 0x0e, 0x1c, 0x7b, 0x30, 0x0a, 0x87, 0x9f, 0xb4, 0x07, 0x2d, 0x65, 0x4f, 0xdd, 0xdf, 0xbd,
|
||||
0x53, 0xe2, 0x4d, 0xe0, 0x98, 0x8c, 0x68, 0x95, 0x71, 0xb2, 0x82, 0x3d, 0xa0, 0x0e, 0xb0, 0xeb,
|
||||
0x07, 0x89, 0x42, 0x91, 0x2b, 0x3c, 0xb9, 0x53, 0xc1, 0x64, 0x5c, 0xa1, 0x01, 0x06, 0xd9, 0xba,
|
||||
0xfd, 0x55, 0x01, 0xb5, 0xb3, 0xb4, 0xa0, 0xa4, 0x1f, 0x9c, 0x87, 0xb0, 0x0b, 0xaa, 0x59, 0x89,
|
||||
0x6d, 0x82, 0xa9, 0xa6, 0x70, 0x69, 0x5d, 0x96, 0x16, 0x05, 0xcc, 0x12, 0x4f, 0x30, 0xb5, 0xd6,
|
||||
0xa7, 0xd2, 0x0e, 0x1a, 0x60, 0x73, 0x84, 0x08, 0xb5, 0x3d, 0xec, 0xbb, 0x1e, 0xb5, 0x87, 0x1e,
|
||||
0x0a, 0x5c, 0xec, 0xf0, 0x7b, 0x16, 0xad, 0x87, 0x0c, 0x3a, 0xe2, 0x48, 0x57, 0x00, 0xed, 0x6f,
|
||||
0x0a, 0xd8, 0xec, 0x32, 0x9f, 0x01, 0x99, 0x90, 0xf7, 0xfc, 0xfd, 0xb8, 0x19, 0x0b, 0x6c, 0x0c,
|
||||
0xd3, 0xb0, 0x2d, 0xde, 0x35, 0xf1, 0xb3, 0xbb, 0xec, 0x67, 0x41, 0xc0, 0x5c, 0xb9, 0xba, 0x69,
|
||||
0x16, 0xac, 0xfa, 0x70, 0x3e, 0xfc, 0xcf, 0xde, 0x3c, 0x50, 0x3a, 0x13, 0x8d, 0x03, 0x0f, 0x41,
|
||||
0x25, 0x53, 0x4b, 0x7c, 0xec, 0xc8, 0x3e, 0x92, 0x06, 0xcb, 0x9d, 0x24, 0x1e, 0xf2, 0x2c, 0xd8,
|
||||
0x00, 0x65, 0x12, 0x9e, 0xd3, 0xcf, 0x28, 0xc6, 0xfc, 0xc8, 0x8a, 0x95, 0xed, 0xdb, 0xbf, 0xd7,
|
||||
0xc0, 0xea, 0x09, 0x9b, 0x23, 0xf8, 0x1a, 0x94, 0x12, 0xad, 0xe4, 0x98, 0x2d, 0x63, 0x71, 0xd6,
|
||||
0x8c, 0xc4, 0x54, 0x72, 0x44, 0xca, 0x87, 0xcf, 0x41, 0x79, 0xe8, 0x21, 0x3f, 0xb0, 0x7d, 0x71,
|
||||
0xa7, 0x8a, 0xa9, 0xce, 0x6e, 0x9a, 0xa5, 0x2e, 0x8b, 0xf5, 0x7b, 0x56, 0x89, 0x83, 0x7d, 0x07,
|
||||
0x3e, 0x03, 0x35, 0x3f, 0xf0, 0xa9, 0x8f, 0x46, 0x49, 0x25, 0xb4, 0x1a, 0xaf, 0x40, 0x35, 0x89,
|
||||
0x8a, 0x22, 0xc0, 0x17, 0x80, 0x97, 0x44, 0xb4, 0x59, 0xca, 0x2c, 0x72, 0x66, 0x9d, 0x01, 0xbc,
|
||||
0x8f, 0x12, 0xae, 0x05, 0xaa, 0x12, 0xd7, 0x77, 0xb4, 0x95, 0x65, 0xef, 0xe2, 0xa9, 0x78, 0x56,
|
||||
0xbf, 0x67, 0x6e, 0x32, 0xef, 0xb3, 0x9b, 0xa6, 0x7a, 0x9c, 0x4a, 0xf5, 0x7b, 0x96, 0x9a, 0xe9,
|
||||
0xf6, 0x1d, 0x78, 0x0c, 0xea, 0x92, 0x26, 0x1b, 0x4e, 0x6d, 0x95, 0xab, 0x36, 0x0c, 0x31, 0xb9,
|
||||
0x46, 0x3a, 0xb9, 0xc6, 0x69, 0x3a, 0xb9, 0x66, 0x99, 0xc9, 0x5e, 0xfe, 0x6c, 0x2a, 0x56, 0x35,
|
||||
0xd3, 0x62, 0x28, 0x7c, 0x0b, 0xea, 0x01, 0xbe, 0xa0, 0x76, 0xd6, 0xac, 0x44, 0x5b, 0xbb, 0x57,
|
||||
0x7b, 0xd7, 0x58, 0x5a, 0x3e, 0x29, 0xf0, 0x00, 0x00, 0x49, 0xa3, 0x74, 0x2f, 0x0d, 0x29, 0x83,
|
||||
0x19, 0xe1, 0xd7, 0x92, 0x44, 0xca, 0xf7, 0x33, 0xc2, 0xd2, 0x24, 0x23, 0x5d, 0xa0, 0xcb, 0xdd,
|
||||
0x9c, 0xeb, 0x65, 0x8d, 0x5d, 0xe1, 0x8f, 0xb5, 0x9d, 0x37, 0x76, 0x9e, 0x9d, 0xb4, 0xf8, 0x5f,
|
||||
0xc7, 0x0c, 0xfc, 0xe7, 0x98, 0xbd, 0x03, 0x4f, 0xe7, 0xc6, 0x6c, 0x41, 0x3f, 0xb3, 0xa7, 0x72,
|
||||
0x7b, 0x2d, 0x69, 0xee, 0xe6, 0x85, 0x52, 0x8f, 0x69, 0x23, 0xc6, 0x98, 0x4c, 0x46, 0x94, 0xd8,
|
||||
0x1e, 0x22, 0x9e, 0xb6, 0xde, 0x52, 0xf6, 0xd6, 0x45, 0x23, 0x5a, 0x22, 0x7e, 0x84, 0x88, 0x07,
|
||||
0xb7, 0x40, 0x19, 0x45, 0x91, 0xa0, 0x54, 0x39, 0xa5, 0x84, 0xa2, 0x88, 0x41, 0xe6, 0x87, 0xab,
|
||||
0x99, 0xae, 0x5c, 0xcf, 0x74, 0xe5, 0xd7, 0x4c, 0x57, 0x2e, 0x6f, 0xf5, 0xc2, 0xf5, 0xad, 0x5e,
|
||||
0xf8, 0x7e, 0xab, 0x17, 0x3e, 0xbe, 0x72, 0x7d, 0xea, 0x4d, 0x06, 0xc6, 0x30, 0x1c, 0x77, 0xe4,
|
||||
0x3f, 0x25, 0x5f, 0x8a, 0x8f, 0x6d, 0xf1, 0x4b, 0x1c, 0xac, 0xf1, 0xf8, 0xcb, 0x3f, 0x01, 0x00,
|
||||
0x00, 0xff, 0xff, 0xa5, 0x17, 0xac, 0x23, 0x2d, 0x07, 0x00, 0x00,
|
||||
// 717 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0x4d, 0x6f, 0xd3, 0x4a,
|
||||
0x14, 0x8d, 0x5f, 0x3f, 0x92, 0x4c, 0x9a, 0xa4, 0x6f, 0xfa, 0x16, 0x69, 0xfa, 0xea, 0xe4, 0x45,
|
||||
0x8f, 0xaa, 0x62, 0xe1, 0x48, 0xb0, 0x40, 0x6c, 0x90, 0x9a, 0x54, 0x50, 0x4b, 0x05, 0x81, 0x8b,
|
||||
0xba, 0x60, 0x81, 0x35, 0x71, 0x26, 0xf6, 0x08, 0xc7, 0xb6, 0x3c, 0x93, 0xf2, 0xb1, 0x67, 0xdf,
|
||||
0x2d, 0xff, 0xa8, 0xcb, 0x2e, 0x59, 0x15, 0x48, 0xff, 0x08, 0x9a, 0x0f, 0xdb, 0x93, 0x84, 0x45,
|
||||
0x11, 0xbb, 0xcc, 0x3d, 0xe7, 0x9e, 0x7b, 0xe6, 0xce, 0xbd, 0x31, 0xf8, 0x97, 0xe1, 0x68, 0x8c,
|
||||
0xd3, 0x29, 0x89, 0x58, 0x9f, 0x32, 0xc4, 0x70, 0x9f, 0x7d, 0x4c, 0x30, 0xb5, 0x92, 0x34, 0x66,
|
||||
0x31, 0xdc, 0x2e, 0x50, 0x4b, 0xa0, 0xed, 0x7f, 0xfc, 0xd8, 0x8f, 0x05, 0xd8, 0xe7, 0xbf, 0x24,
|
||||
0xaf, 0xbd, 0xa7, 0xa9, 0xa0, 0x91, 0x47, 0x74, 0x91, 0xb6, 0x5e, 0x42, 0xc4, 0x17, 0xd0, 0xee,
|
||||
0x0a, 0x7a, 0x81, 0x42, 0x32, 0x46, 0x2c, 0x4e, 0x15, 0x63, 0x7f, 0x85, 0x91, 0xa0, 0x14, 0x4d,
|
||||
0x33, 0x01, 0x53, 0x83, 0x2f, 0x70, 0x4a, 0x49, 0x1c, 0x2d, 0x14, 0xe8, 0xf8, 0x71, 0xec, 0x87,
|
||||
0xb8, 0x2f, 0x4e, 0xa3, 0xd9, 0xa4, 0xcf, 0xc8, 0x14, 0x53, 0x86, 0xa6, 0x89, 0x24, 0xf4, 0xde,
|
||||
0x82, 0xfa, 0xd1, 0x60, 0x68, 0x3b, 0x98, 0x26, 0x71, 0x44, 0x31, 0x85, 0xcf, 0x41, 0x63, 0x42,
|
||||
0x22, 0x14, 0x92, 0x4f, 0xd8, 0x1d, 0x85, 0xb1, 0xf7, 0xae, 0xf5, 0x57, 0xd7, 0x38, 0xac, 0x3d,
|
||||
0x38, 0xb0, 0xb4, 0x76, 0xf0, 0x6b, 0x5a, 0x59, 0xce, 0x53, 0x45, 0x1f, 0x70, 0xb6, 0x53, 0x9f,
|
||||
0xe8, 0xc7, 0xde, 0x67, 0x03, 0x34, 0xce, 0xb3, 0x3b, 0x51, 0x3b, 0x9a, 0xc4, 0x70, 0x08, 0xea,
|
||||
0xf9, 0x2d, 0x5d, 0x8a, 0x59, 0xcb, 0x10, 0x05, 0x4c, 0xbd, 0x80, 0xbc, 0x43, 0x9e, 0x78, 0x86,
|
||||
0x99, 0xb3, 0x75, 0xa1, 0x9d, 0xa0, 0x05, 0x76, 0x42, 0x44, 0x99, 0x1b, 0x60, 0xe2, 0x07, 0xcc,
|
||||
0xf5, 0x02, 0x14, 0xf9, 0x78, 0x2c, 0xbc, 0xae, 0x39, 0x7f, 0x73, 0xe8, 0x44, 0x20, 0x43, 0x09,
|
||||
0xf4, 0xbe, 0x18, 0x60, 0x67, 0xc8, 0xdd, 0x46, 0x74, 0x46, 0x5f, 0x8a, 0x16, 0x0a, 0x33, 0x0e,
|
||||
0xd8, 0xf6, 0xb2, 0xb0, 0x2b, 0x5b, 0xab, 0xfc, 0xfc, 0xb7, 0xea, 0x67, 0x49, 0x60, 0xb0, 0x7e,
|
||||
0x75, 0xd3, 0x29, 0x39, 0x4d, 0x6f, 0x31, 0xfc, 0xdb, 0xde, 0x02, 0x50, 0x3e, 0x97, 0x6f, 0x07,
|
||||
0x8f, 0x40, 0x35, 0x57, 0x53, 0x3e, 0xf6, 0x75, 0x1f, 0xea, 0x8d, 0x0b, 0x27, 0xca, 0x43, 0x91,
|
||||
0x05, 0xdb, 0xa0, 0x42, 0xe3, 0x09, 0x7b, 0x8f, 0x52, 0x2c, 0x4a, 0x56, 0x9d, 0xfc, 0xdc, 0xfb,
|
||||
0xb1, 0x09, 0x36, 0xce, 0xf8, 0x28, 0xc3, 0xc7, 0xa0, 0xac, 0xb4, 0x54, 0x99, 0x5d, 0x6b, 0x79,
|
||||
0xdc, 0x2d, 0x65, 0x4a, 0x95, 0xc8, 0xf8, 0xf0, 0x00, 0x54, 0xbc, 0x00, 0x91, 0xc8, 0x25, 0xf2,
|
||||
0x4e, 0xd5, 0x41, 0x6d, 0x7e, 0xd3, 0x29, 0x0f, 0x79, 0xcc, 0x3e, 0x76, 0xca, 0x02, 0xb4, 0xc7,
|
||||
0xf0, 0x1e, 0x68, 0x90, 0x88, 0x30, 0x82, 0x42, 0xd5, 0x89, 0x56, 0x43, 0x74, 0xa0, 0xae, 0xa2,
|
||||
0xb2, 0x09, 0xf0, 0x3e, 0x10, 0x2d, 0x91, 0xc3, 0x96, 0x31, 0xd7, 0x04, 0xb3, 0xc9, 0x01, 0x31,
|
||||
0x47, 0x8a, 0xeb, 0x80, 0xba, 0xc6, 0x25, 0xe3, 0xd6, 0xfa, 0xaa, 0x77, 0xf9, 0x54, 0x22, 0xcb,
|
||||
0x3e, 0x1e, 0xec, 0x70, 0xef, 0xf3, 0x9b, 0x4e, 0xed, 0x34, 0x93, 0xb2, 0x8f, 0x9d, 0x5a, 0xae,
|
||||
0x6b, 0x8f, 0xe1, 0x29, 0x68, 0x6a, 0x9a, 0x7c, 0x3f, 0x5a, 0x1b, 0x42, 0xb5, 0x6d, 0xc9, 0xe5,
|
||||
0xb1, 0xb2, 0xe5, 0xb1, 0x5e, 0x67, 0xcb, 0x33, 0xa8, 0x70, 0xd9, 0xcb, 0x6f, 0x1d, 0xc3, 0xa9,
|
||||
0xe7, 0x5a, 0x1c, 0x85, 0xcf, 0x40, 0x33, 0xc2, 0x1f, 0x98, 0x9b, 0x0f, 0x2b, 0x6d, 0x6d, 0xde,
|
||||
0x69, 0xbc, 0x1b, 0x3c, 0xad, 0xd8, 0x14, 0xf8, 0x04, 0x00, 0x4d, 0xa3, 0x7c, 0x27, 0x0d, 0x2d,
|
||||
0x83, 0x1b, 0x11, 0xd7, 0xd2, 0x44, 0x2a, 0x77, 0x33, 0xc2, 0xd3, 0x34, 0x23, 0x43, 0x60, 0xea,
|
||||
0xd3, 0x5c, 0xe8, 0xe5, 0x83, 0x5d, 0x15, 0x8f, 0xb5, 0x57, 0x0c, 0x76, 0x91, 0xad, 0x46, 0xfc,
|
||||
0x97, 0x6b, 0x06, 0xfe, 0x70, 0xcd, 0x5e, 0x80, 0xff, 0x17, 0xd6, 0x6c, 0x49, 0x3f, 0xb7, 0x57,
|
||||
0x13, 0xf6, 0xba, 0xda, 0xde, 0x2d, 0x0a, 0x65, 0x1e, 0xb3, 0x41, 0x4c, 0x31, 0x9d, 0x85, 0x8c,
|
||||
0xba, 0x01, 0xa2, 0x41, 0x6b, 0xab, 0x6b, 0x1c, 0x6e, 0xc9, 0x41, 0x74, 0x64, 0xfc, 0x04, 0xd1,
|
||||
0x00, 0xee, 0x82, 0x0a, 0x4a, 0x12, 0x49, 0xa9, 0x0b, 0x4a, 0x19, 0x25, 0x09, 0x87, 0x06, 0xaf,
|
||||
0xae, 0xe6, 0xa6, 0x71, 0x3d, 0x37, 0x8d, 0xef, 0x73, 0xd3, 0xb8, 0xbc, 0x35, 0x4b, 0xd7, 0xb7,
|
||||
0x66, 0xe9, 0xeb, 0xad, 0x59, 0x7a, 0xf3, 0xc8, 0x27, 0x2c, 0x98, 0x8d, 0x2c, 0x2f, 0x9e, 0xf6,
|
||||
0xf5, 0xbf, 0xf5, 0xe2, 0xa7, 0xfc, 0xb6, 0x2c, 0x7f, 0x95, 0x46, 0x9b, 0x22, 0xfe, 0xf0, 0x67,
|
||||
0x00, 0x00, 0x00, 0xff, 0xff, 0x88, 0xe8, 0x4c, 0x4d, 0xb0, 0x06, 0x00, 0x00,
|
||||
}
|
||||
|
||||
func (m *ABCIResponses) Marshal() (dAtA []byte, err error) {
|
||||
@@ -493,21 +474,9 @@ func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.BeginBlock != nil {
|
||||
if m.FinalizeBlock != nil {
|
||||
{
|
||||
size, err := m.BeginBlock.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0x1a
|
||||
}
|
||||
if m.EndBlock != nil {
|
||||
{
|
||||
size, err := m.EndBlock.MarshalToSizedBuffer(dAtA[:i])
|
||||
size, err := m.FinalizeBlock.MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@@ -517,20 +486,6 @@ func (m *ABCIResponses) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i--
|
||||
dAtA[i] = 0x12
|
||||
}
|
||||
if len(m.DeliverTxs) > 0 {
|
||||
for iNdEx := len(m.DeliverTxs) - 1; iNdEx >= 0; iNdEx-- {
|
||||
{
|
||||
size, err := m.DeliverTxs[iNdEx].MarshalToSizedBuffer(dAtA[:i])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
i -= size
|
||||
i = encodeVarintTypes(dAtA, i, uint64(size))
|
||||
}
|
||||
i--
|
||||
dAtA[i] = 0xa
|
||||
}
|
||||
}
|
||||
return len(dAtA) - i, nil
|
||||
}
|
||||
|
||||
@@ -747,12 +702,12 @@ func (m *State) MarshalToSizedBuffer(dAtA []byte) (int, error) {
|
||||
i--
|
||||
dAtA[i] = 0x32
|
||||
}
|
||||
n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
|
||||
if err10 != nil {
|
||||
return 0, err10
|
||||
n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.LastBlockTime, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.LastBlockTime):])
|
||||
if err9 != nil {
|
||||
return 0, err9
|
||||
}
|
||||
i -= n10
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n10))
|
||||
i -= n9
|
||||
i = encodeVarintTypes(dAtA, i, uint64(n9))
|
||||
i--
|
||||
dAtA[i] = 0x2a
|
||||
{
|
||||
@@ -807,18 +762,8 @@ func (m *ABCIResponses) Size() (n int) {
|
||||
}
|
||||
var l int
|
||||
_ = l
|
||||
if len(m.DeliverTxs) > 0 {
|
||||
for _, e := range m.DeliverTxs {
|
||||
l = e.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.EndBlock != nil {
|
||||
l = m.EndBlock.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
if m.BeginBlock != nil {
|
||||
l = m.BeginBlock.Size()
|
||||
if m.FinalizeBlock != nil {
|
||||
l = m.FinalizeBlock.Size()
|
||||
n += 1 + l + sovTypes(uint64(l))
|
||||
}
|
||||
return n
|
||||
@@ -957,43 +902,9 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error {
|
||||
return fmt.Errorf("proto: ABCIResponses: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field DeliverTxs", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.DeliverTxs = append(m.DeliverTxs, &types.ResponseDeliverTx{})
|
||||
if err := m.DeliverTxs[len(m.DeliverTxs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 2:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field EndBlock", wireType)
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field FinalizeBlock", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
@@ -1020,46 +931,10 @@ func (m *ABCIResponses) Unmarshal(dAtA []byte) error {
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.EndBlock == nil {
|
||||
m.EndBlock = &types.ResponseEndBlock{}
|
||||
if m.FinalizeBlock == nil {
|
||||
m.FinalizeBlock = &types.ResponseFinalizeBlock{}
|
||||
}
|
||||
if err := m.EndBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field BeginBlock", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowTypes
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= int(b&0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex < 0 {
|
||||
return ErrInvalidLengthTypes
|
||||
}
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
if m.BeginBlock == nil {
|
||||
m.BeginBlock = &types.ResponseBeginBlock{}
|
||||
}
|
||||
if err := m.BeginBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
if err := m.FinalizeBlock.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
|
||||
return err
|
||||
}
|
||||
iNdEx = postIndex
|
||||
|
||||
@@ -15,9 +15,7 @@ import "google/protobuf/timestamp.proto";
|
||||
// of the various ABCI calls during block processing.
|
||||
// It is persisted to disk for each height before calling Commit.
|
||||
message ABCIResponses {
|
||||
repeated tendermint.abci.ResponseDeliverTx deliver_txs = 1;
|
||||
tendermint.abci.ResponseEndBlock end_block = 2;
|
||||
tendermint.abci.ResponseBeginBlock begin_block = 3;
|
||||
tendermint.abci.ResponseFinalizeBlock finalize_block = 2;
|
||||
}
|
||||
|
||||
// ValidatorsInfo represents the latest validator set, or the last height it changed
|
||||
|
||||
@@ -18,9 +18,7 @@ type AppConnConsensus interface {
|
||||
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abcicli.ReqRes, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
FinalizeBlockSync(context.Context, types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
}
|
||||
|
||||
@@ -80,28 +78,14 @@ func (app *appConnConsensus) InitChainSync(
|
||||
return app.appConn.InitChainSync(ctx, req)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
return app.appConn.BeginBlockSync(ctx, req)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
return app.appConn.DeliverTxAsync(ctx, req)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
return app.appConn.EndBlockSync(ctx, req)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
return app.appConn.CommitSync(ctx)
|
||||
}
|
||||
|
||||
func (app *appConnConsensus) FinalizeBlockSync(ctx context.Context, req types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
return app.appConn.FinalizeBlockSync(ctx, req)
|
||||
}
|
||||
|
||||
//------------------------------------------------
|
||||
// Implements AppConnMempool (subset of abcicli.Client)
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -17,29 +17,6 @@ type AppConnConsensus struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
@@ -63,52 +40,6 @@ func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseComm
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abcicli.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *AppConnConsensus) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Error provides a mock function with given fields:
|
||||
func (_m *AppConnConsensus) Error() error {
|
||||
ret := _m.Called()
|
||||
@@ -123,6 +54,29 @@ func (_m *AppConnConsensus) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// FinalizeBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *AppConnConsensus) FinalizeBlockSync(_a0 context.Context, _a1 types.RequestFinalizeBlock) (*types.ResponseFinalizeBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseFinalizeBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestFinalizeBlock) *types.ResponseFinalizeBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseFinalizeBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestFinalizeBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -120,7 +120,7 @@ func TestBroadcastEvidence_DuplicateVoteEvidence(t *testing.T) {
|
||||
// previous versions of this test used a shared fixture with
|
||||
// other tests, and in this version we give it a little time
|
||||
// for the node to make progress before running the test
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
chainID := config.ChainID()
|
||||
|
||||
|
||||
@@ -55,7 +55,8 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.Re
|
||||
if res.CheckTx.IsErr() {
|
||||
return &res, nil
|
||||
}
|
||||
res.DeliverTx = a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx})
|
||||
fb := a.App.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{tx}})
|
||||
res.DeliverTx = *fb.Txs[0]
|
||||
res.Height = -1 // TODO
|
||||
return &res, nil
|
||||
}
|
||||
@@ -64,7 +65,7 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.Res
|
||||
c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
|
||||
// and this gets written in a background thread...
|
||||
if !c.IsErr() {
|
||||
go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }()
|
||||
go func() { a.App.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) }()
|
||||
}
|
||||
return &ctypes.ResultBroadcastTx{
|
||||
Code: c.Code,
|
||||
@@ -79,7 +80,7 @@ func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.Resu
|
||||
c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx})
|
||||
// and this gets written in a background thread...
|
||||
if !c.IsErr() {
|
||||
go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }()
|
||||
go func() { a.App.FinalizeBlock(abci.RequestFinalizeBlock{Txs: [][]byte{tx}}) }()
|
||||
}
|
||||
return &ctypes.ResultBroadcastTx{
|
||||
Code: c.Code,
|
||||
|
||||
@@ -3,7 +3,6 @@ package mock_test
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -24,8 +23,6 @@ func TestStatus(t *testing.T) {
|
||||
LatestAppHash: bytes.HexBytes("app"),
|
||||
LatestBlockHeight: 10,
|
||||
MaxPeerBlockHeight: 20,
|
||||
TotalSyncedTime: time.Second,
|
||||
RemainingTime: time.Minute,
|
||||
},
|
||||
}},
|
||||
}
|
||||
@@ -39,8 +36,6 @@ func TestStatus(t *testing.T) {
|
||||
assert.EqualValues("block", status.SyncInfo.LatestBlockHash)
|
||||
assert.EqualValues(10, status.SyncInfo.LatestBlockHeight)
|
||||
assert.EqualValues(20, status.SyncInfo.MaxPeerBlockHeight)
|
||||
assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime)
|
||||
assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime)
|
||||
|
||||
// make sure recorder works properly
|
||||
require.Equal(1, len(r.Calls))
|
||||
@@ -54,6 +49,4 @@ func TestStatus(t *testing.T) {
|
||||
assert.EqualValues("block", st.SyncInfo.LatestBlockHash)
|
||||
assert.EqualValues(10, st.SyncInfo.LatestBlockHeight)
|
||||
assert.EqualValues(20, st.SyncInfo.MaxPeerBlockHeight)
|
||||
assert.EqualValues(time.Second, status.SyncInfo.TotalSyncedTime)
|
||||
assert.EqualValues(time.Minute, status.SyncInfo.RemainingTime)
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery 2.7.4. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -163,18 +163,17 @@ func (env *Environment) BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*
|
||||
}
|
||||
|
||||
var totalGasUsed int64
|
||||
for _, tx := range results.GetDeliverTxs() {
|
||||
for _, tx := range results.FinalizeBlock.GetTxs() {
|
||||
totalGasUsed += tx.GetGasUsed()
|
||||
}
|
||||
|
||||
return &ctypes.ResultBlockResults{
|
||||
Height: height,
|
||||
TxsResults: results.DeliverTxs,
|
||||
TxsResults: results.FinalizeBlock.Txs,
|
||||
TotalGasUsed: totalGasUsed,
|
||||
BeginBlockEvents: results.BeginBlock.Events,
|
||||
EndBlockEvents: results.EndBlock.Events,
|
||||
ValidatorUpdates: results.EndBlock.ValidatorUpdates,
|
||||
ConsensusParamUpdates: results.EndBlock.ConsensusParamUpdates,
|
||||
FinalizeBlockEvents: results.FinalizeBlock.Events,
|
||||
ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates,
|
||||
ConsensusParamUpdates: results.FinalizeBlock.ConsensusParamUpdates,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -71,13 +71,13 @@ func TestBlockchainInfo(t *testing.T) {
|
||||
|
||||
func TestBlockResults(t *testing.T) {
|
||||
results := &tmstate.ABCIResponses{
|
||||
DeliverTxs: []*abci.ResponseDeliverTx{
|
||||
{Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10},
|
||||
{Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5},
|
||||
{Code: 1, Log: "not ok", GasUsed: 0},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
Txs: []*abci.ResponseDeliverTx{
|
||||
{Code: 0, Data: []byte{0x01}, Log: "ok", GasUsed: 10},
|
||||
{Code: 0, Data: []byte{0x02}, Log: "ok", GasUsed: 5},
|
||||
{Code: 1, Log: "not ok", GasUsed: 0},
|
||||
},
|
||||
},
|
||||
EndBlock: &abci.ResponseEndBlock{},
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
}
|
||||
|
||||
env := &Environment{}
|
||||
@@ -96,12 +96,11 @@ func TestBlockResults(t *testing.T) {
|
||||
{101, true, nil},
|
||||
{100, false, &ctypes.ResultBlockResults{
|
||||
Height: 100,
|
||||
TxsResults: results.DeliverTxs,
|
||||
TxsResults: results.FinalizeBlock.Txs,
|
||||
TotalGasUsed: 15,
|
||||
BeginBlockEvents: results.BeginBlock.Events,
|
||||
EndBlockEvents: results.EndBlock.Events,
|
||||
ValidatorUpdates: results.EndBlock.ValidatorUpdates,
|
||||
ConsensusParamUpdates: results.EndBlock.ConsensusParamUpdates,
|
||||
FinalizeBlockEvents: results.FinalizeBlock.Events,
|
||||
ValidatorUpdates: results.FinalizeBlock.ValidatorUpdates,
|
||||
ConsensusParamUpdates: results.FinalizeBlock.ConsensusParamUpdates,
|
||||
}},
|
||||
}
|
||||
|
||||
|
||||
@@ -71,8 +71,6 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err
|
||||
EarliestBlockTime: time.Unix(0, earliestBlockTimeNano),
|
||||
MaxPeerBlockHeight: env.FastSyncReactor.GetMaxPeerBlockHeight(),
|
||||
CatchingUp: env.ConsensusReactor.WaitSync(),
|
||||
TotalSyncedTime: env.FastSyncReactor.GetTotalSyncedTime(),
|
||||
RemainingTime: env.FastSyncReactor.GetRemainingSyncTime(),
|
||||
},
|
||||
ValidatorInfo: validatorInfo,
|
||||
}
|
||||
|
||||
@@ -63,8 +63,7 @@ type ResultBlockResults struct {
|
||||
Height int64 `json:"height"`
|
||||
TxsResults []*abci.ResponseDeliverTx `json:"txs_results"`
|
||||
TotalGasUsed int64 `json:"total_gas_used"`
|
||||
BeginBlockEvents []abci.Event `json:"begin_block_events"`
|
||||
EndBlockEvents []abci.Event `json:"end_block_events"`
|
||||
FinalizeBlockEvents []abci.Event `json:"finalize_block_events"`
|
||||
ValidatorUpdates []abci.ValidatorUpdate `json:"validator_updates"`
|
||||
ConsensusParamUpdates *tmproto.ConsensusParams `json:"consensus_param_updates"`
|
||||
}
|
||||
@@ -98,9 +97,6 @@ type SyncInfo struct {
|
||||
MaxPeerBlockHeight int64 `json:"max_peer_block_height"`
|
||||
|
||||
CatchingUp bool `json:"catching_up"`
|
||||
|
||||
TotalSyncedTime time.Duration `json:"total_synced_time"`
|
||||
RemainingTime time.Duration `json:"remaining_time"`
|
||||
}
|
||||
|
||||
// Info about the node's validator
|
||||
|
||||
@@ -1330,12 +1330,6 @@ components:
|
||||
catching_up:
|
||||
type: boolean
|
||||
example: false
|
||||
total_synced_time:
|
||||
type: string
|
||||
example: "1000000000"
|
||||
remaining_time:
|
||||
type: string
|
||||
example: "0"
|
||||
ValidatorInfo:
|
||||
type: object
|
||||
properties:
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/google/orderedcode"
|
||||
"github.com/pkg/errors"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
@@ -394,7 +395,7 @@ func Migrate(ctx context.Context, db dbm.DB) error {
|
||||
|
||||
// check the error results
|
||||
if len(errs) != 0 {
|
||||
return fmt.Errorf("encountered errors during migration: %v", errStrs)
|
||||
return errors.Errorf("encountered errors during migration: %v", errStrs)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -177,7 +177,7 @@ func (blockExec *BlockExecutor) ApplyBlock(
|
||||
fail.Fail() // XXX
|
||||
|
||||
// validate the validator updates and convert to tendermint types
|
||||
abciValUpdates := abciResponses.EndBlock.ValidatorUpdates
|
||||
abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates
|
||||
err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator)
|
||||
if err != nil {
|
||||
return state, fmt.Errorf("error in validator updates: %v", err)
|
||||
@@ -198,7 +198,7 @@ func (blockExec *BlockExecutor) ApplyBlock(
|
||||
}
|
||||
|
||||
// Lock mempool, commit app state, update mempoool.
|
||||
appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.DeliverTxs)
|
||||
appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.FinalizeBlock.Txs)
|
||||
if err != nil {
|
||||
return state, fmt.Errorf("commit failed for application: %v", err)
|
||||
}
|
||||
@@ -299,27 +299,29 @@ func execBlockOnProxyApp(
|
||||
) (*tmstate.ABCIResponses, error) {
|
||||
var validTxs, invalidTxs = 0, 0
|
||||
|
||||
txIndex := 0
|
||||
abciResponses := new(tmstate.ABCIResponses)
|
||||
abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{}
|
||||
dtxs := make([]*abci.ResponseDeliverTx, len(block.Txs))
|
||||
abciResponses.DeliverTxs = dtxs
|
||||
abciResponses.FinalizeBlock.Txs = dtxs
|
||||
|
||||
// Execute transactions and get hash.
|
||||
proxyCb := func(req *abci.Request, res *abci.Response) {
|
||||
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := r.DeliverTx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
}
|
||||
if r, ok := res.Value.(*abci.Response_FinalizeBlock); ok {
|
||||
for i, tx := range r.FinalizeBlock.Txs {
|
||||
|
||||
abciResponses.DeliverTxs[txIndex] = txRes
|
||||
txIndex++
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := tx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
}
|
||||
|
||||
abciResponses.FinalizeBlock.Txs[i] = txRes
|
||||
}
|
||||
}
|
||||
}
|
||||
proxyAppConn.SetResponseCallback(proxyCb)
|
||||
@@ -340,34 +342,17 @@ func execBlockOnProxyApp(
|
||||
return nil, errors.New("nil header")
|
||||
}
|
||||
|
||||
abciResponses.BeginBlock, err = proxyAppConn.BeginBlockSync(
|
||||
ctx,
|
||||
abci.RequestBeginBlock{
|
||||
Hash: block.Hash(),
|
||||
Header: *pbh,
|
||||
LastCommitInfo: commitInfo,
|
||||
ByzantineValidators: byzVals,
|
||||
},
|
||||
)
|
||||
abciResponses.FinalizeBlock, err = proxyAppConn.FinalizeBlockSync(ctx, abci.RequestFinalizeBlock{
|
||||
Hash: block.Hash(),
|
||||
Header: *pbh,
|
||||
LastCommitInfo: commitInfo,
|
||||
ByzantineValidators: byzVals,
|
||||
Txs: block.Txs.ToSliceOfBytes(),
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error("error in proxyAppConn.BeginBlock", "err", err)
|
||||
logger.Error("error in proxyAppConn.FinalizeBlock", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// run txs of block
|
||||
for _, tx := range block.Txs {
|
||||
_, err = proxyAppConn.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: tx})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(ctx, abci.RequestEndBlock{Height: block.Height})
|
||||
if err != nil {
|
||||
logger.Error("error in proxyAppConn.EndBlock", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
logger.Info("executed block", "height", block.Height, "num_valid_txs", validTxs, "num_invalid_txs", invalidTxs)
|
||||
return abciResponses, nil
|
||||
}
|
||||
@@ -467,9 +452,9 @@ func updateState(
|
||||
// Update the params with the latest abciResponses.
|
||||
nextParams := state.ConsensusParams
|
||||
lastHeightParamsChanged := state.LastHeightConsensusParamsChanged
|
||||
if abciResponses.EndBlock.ConsensusParamUpdates != nil {
|
||||
if abciResponses.FinalizeBlock.ConsensusParamUpdates != nil {
|
||||
// NOTE: must not mutate s.ConsensusParams
|
||||
nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.EndBlock.ConsensusParamUpdates)
|
||||
nextParams = state.ConsensusParams.UpdateConsensusParams(abciResponses.FinalizeBlock.ConsensusParamUpdates)
|
||||
err := nextParams.ValidateConsensusParams()
|
||||
if err != nil {
|
||||
return state, fmt.Errorf("error updating consensus params: %v", err)
|
||||
@@ -515,19 +500,17 @@ func fireEvents(
|
||||
validatorUpdates []*types.Validator,
|
||||
) {
|
||||
if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{
|
||||
Block: block,
|
||||
BlockID: blockID,
|
||||
ResultBeginBlock: *abciResponses.BeginBlock,
|
||||
ResultEndBlock: *abciResponses.EndBlock,
|
||||
Block: block,
|
||||
BlockID: blockID,
|
||||
ResultFinalizeBlock: *abciResponses.FinalizeBlock,
|
||||
}); err != nil {
|
||||
logger.Error("failed publishing new block", "err", err)
|
||||
}
|
||||
|
||||
if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{
|
||||
Header: block.Header,
|
||||
NumTxs: int64(len(block.Txs)),
|
||||
ResultBeginBlock: *abciResponses.BeginBlock,
|
||||
ResultEndBlock: *abciResponses.EndBlock,
|
||||
Header: block.Header,
|
||||
NumTxs: int64(len(block.Txs)),
|
||||
ResultFinalizeBlock: *abciResponses.FinalizeBlock,
|
||||
}); err != nil {
|
||||
logger.Error("failed publishing new block header", "err", err)
|
||||
}
|
||||
@@ -543,14 +526,16 @@ func fireEvents(
|
||||
}
|
||||
}
|
||||
|
||||
for i, tx := range block.Data.Txs {
|
||||
if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{
|
||||
Height: block.Height,
|
||||
Index: uint32(i),
|
||||
Tx: tx,
|
||||
Result: *(abciResponses.DeliverTxs[i]),
|
||||
}}); err != nil {
|
||||
logger.Error("failed publishing event TX", "err", err)
|
||||
if len(abciResponses.FinalizeBlock.Txs) != 0 {
|
||||
for i, tx := range block.Data.Txs {
|
||||
if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{
|
||||
Height: block.Height,
|
||||
Index: uint32(i),
|
||||
Tx: tx,
|
||||
Result: *(abciResponses.FinalizeBlock.Txs[i]),
|
||||
}}); err != nil {
|
||||
logger.Error("failed publishing event TX", "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -584,7 +569,7 @@ func ExecCommitBlock(
|
||||
|
||||
// the BlockExecutor condition is using for the final block replay process.
|
||||
if be != nil {
|
||||
abciValUpdates := abciResponses.EndBlock.ValidatorUpdates
|
||||
abciValUpdates := abciResponses.FinalizeBlock.ValidatorUpdates
|
||||
err = validateValidatorUpdates(abciValUpdates, s.ConsensusParams.Validator)
|
||||
if err != nil {
|
||||
logger.Error("err", err)
|
||||
|
||||
@@ -141,8 +141,7 @@ func makeHeaderPartsResponsesValPubKeyChange(
|
||||
|
||||
block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
// If the pubkey is new, remove the old and add the new.
|
||||
_, val := state.NextValidators.GetByIndex(0)
|
||||
@@ -155,7 +154,7 @@ func makeHeaderPartsResponsesValPubKeyChange(
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
||||
abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: []abci.ValidatorUpdate{
|
||||
{PubKey: vPbPk, Power: 0},
|
||||
{PubKey: pbPk, Power: 10},
|
||||
@@ -173,8 +172,7 @@ func makeHeaderPartsResponsesValPowerChange(
|
||||
|
||||
block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{ValidatorUpdates: nil},
|
||||
}
|
||||
|
||||
// If the pubkey is new, remove the old and add the new.
|
||||
@@ -184,7 +182,7 @@ func makeHeaderPartsResponsesValPowerChange(
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
abciResponses.EndBlock = &abci.ResponseEndBlock{
|
||||
abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: []abci.ValidatorUpdate{
|
||||
{PubKey: vPbPk, Power: power},
|
||||
},
|
||||
@@ -202,8 +200,7 @@ func makeHeaderPartsResponsesParams(
|
||||
block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
|
||||
pbParams := params.ToProto()
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: &pbParams},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{ConsensusParamUpdates: &pbParams},
|
||||
}
|
||||
return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses
|
||||
}
|
||||
@@ -274,22 +271,15 @@ func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
||||
return abci.ResponseInfo{}
|
||||
}
|
||||
|
||||
func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
func (app *testApp) FinalizeBlock(req abci.RequestFinalizeBlock) abci.ResponseFinalizeBlock {
|
||||
app.CommitVotes = req.LastCommitInfo.Votes
|
||||
app.ByzantineValidators = req.ByzantineValidators
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
func (app *testApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
return abci.ResponseEndBlock{
|
||||
return abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: app.ValidatorUpdates,
|
||||
ConsensusParamUpdates: &tmproto.ConsensusParams{
|
||||
Version: &tmproto.VersionParams{
|
||||
AppVersion: 1}}}
|
||||
}
|
||||
|
||||
func (app *testApp) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
return abci.ResponseDeliverTx{Events: []abci.Event{}}
|
||||
AppVersion: 1}},
|
||||
}
|
||||
}
|
||||
|
||||
func (app *testApp) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
|
||||
@@ -65,15 +65,10 @@ func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error {
|
||||
}
|
||||
|
||||
// 2. index BeginBlock events
|
||||
if err := idx.indexEvents(batch, bh.ResultBeginBlock.Events, "begin_block", height); err != nil {
|
||||
if err := idx.indexEvents(batch, bh.ResultFinalizeBlock.Events, "begin_block", height); err != nil {
|
||||
return fmt.Errorf("failed to index BeginBlock events: %w", err)
|
||||
}
|
||||
|
||||
// 3. index EndBlock events
|
||||
if err := idx.indexEvents(batch, bh.ResultEndBlock.Events, "end_block", height); err != nil {
|
||||
return fmt.Errorf("failed to index EndBlock events: %w", err)
|
||||
}
|
||||
|
||||
return batch.WriteSync()
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package kv_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -19,7 +18,7 @@ func TestBlockIndexer(t *testing.T) {
|
||||
|
||||
require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: 1},
|
||||
ResultBeginBlock: abci.ResponseBeginBlock{
|
||||
ResultFinalizeBlock: abci.ResponseFinalizeBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "begin_event",
|
||||
@@ -33,34 +32,15 @@ func TestBlockIndexer(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ResultEndBlock: abci.ResponseEndBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: "100",
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
for i := 2; i < 12; i++ {
|
||||
var index bool
|
||||
if i%2 == 0 {
|
||||
index = true
|
||||
}
|
||||
|
||||
require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: int64(i)},
|
||||
ResultBeginBlock: abci.ResponseBeginBlock{
|
||||
ResultFinalizeBlock: abci.ResponseFinalizeBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "begin_event",
|
||||
Type: "finalize_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "proposer",
|
||||
@@ -71,20 +51,6 @@ func TestBlockIndexer(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ResultEndBlock: abci.ResponseEndBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: fmt.Sprintf("%d", i),
|
||||
Index: index,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -32,10 +32,10 @@ func TestBlockFuncs(t *testing.T) {
|
||||
|
||||
require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: 1},
|
||||
ResultBeginBlock: abci.ResponseBeginBlock{
|
||||
ResultFinalizeBlock: abci.ResponseFinalizeBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "begin_event",
|
||||
Type: "finalize_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "proposer",
|
||||
@@ -46,20 +46,6 @@ func TestBlockFuncs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ResultEndBlock: abci.ResponseEndBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: "100",
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
|
||||
b, e := indexer.HasBlock(1)
|
||||
@@ -67,17 +53,12 @@ func TestBlockFuncs(t *testing.T) {
|
||||
assert.True(t, b)
|
||||
|
||||
for i := 2; i < 12; i++ {
|
||||
var index bool
|
||||
if i%2 == 0 {
|
||||
index = true
|
||||
}
|
||||
|
||||
require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: int64(i)},
|
||||
ResultBeginBlock: abci.ResponseBeginBlock{
|
||||
ResultFinalizeBlock: abci.ResponseFinalizeBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "begin_event",
|
||||
Type: "finalize_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "proposer",
|
||||
@@ -88,20 +69,6 @@ func TestBlockFuncs(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
ResultEndBlock: abci.ResponseEndBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: fmt.Sprintf("%d", i),
|
||||
Index: index,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -59,16 +59,9 @@ func (es *EventSink) IndexBlockEvents(h types.EventDataNewBlockHeader) error {
|
||||
sqlStmt = sqlStmt.
|
||||
Values(types.BlockHeightKey, fmt.Sprint(h.Header.Height), h.Header.Height, "", ts, es.chainID)
|
||||
|
||||
// index begin_block events
|
||||
// index finalize_block events
|
||||
sqlStmt, err := indexBlockEvents(
|
||||
sqlStmt, h.ResultBeginBlock.Events, types.EventTypeBeginBlock, h.Header.Height, ts, es.chainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// index end_block events
|
||||
sqlStmt, err = indexBlockEvents(
|
||||
sqlStmt, h.ResultEndBlock.Events, types.EventTypeEndBlock, h.Header.Height, ts, es.chainID)
|
||||
sqlStmt, h.ResultFinalizeBlock.Events, types.EventTypeBeginBlock, h.Header.Height, ts, es.chainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -110,7 +103,6 @@ func (es *EventSink) IndexTxEvents(txr []*abci.TxResult) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
if !r.Next() {
|
||||
return nil
|
||||
|
||||
@@ -130,10 +130,10 @@ func TestStop(t *testing.T) {
|
||||
func getTestBlockHeader() types.EventDataNewBlockHeader {
|
||||
return types.EventDataNewBlockHeader{
|
||||
Header: types.Header{Height: 1},
|
||||
ResultBeginBlock: abci.ResponseBeginBlock{
|
||||
ResultFinalizeBlock: abci.ResponseFinalizeBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "begin_event",
|
||||
Type: "finalize_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "proposer",
|
||||
@@ -144,20 +144,6 @@ func getTestBlockHeader() types.EventDataNewBlockHeader {
|
||||
},
|
||||
},
|
||||
},
|
||||
ResultEndBlock: abci.ResponseEndBlock{
|
||||
Events: []abci.Event{
|
||||
{
|
||||
Type: "end_event",
|
||||
Attributes: []abci.EventAttribute{
|
||||
{
|
||||
Key: "foo",
|
||||
Value: "100",
|
||||
Index: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -293,7 +279,6 @@ func verifyBlock(h int64) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
if !rows.Next() {
|
||||
return false, nil
|
||||
@@ -309,7 +294,6 @@ func verifyBlock(h int64) (bool, error) {
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer rows.Close()
|
||||
|
||||
return rows.Next(), nil
|
||||
}
|
||||
|
||||
@@ -25,7 +25,9 @@ CREATE TABLE tx_events (
|
||||
created_at TIMESTAMPTZ NOT NULL,
|
||||
chain_id VARCHAR NOT NULL,
|
||||
UNIQUE (hash, key),
|
||||
FOREIGN KEY (tx_result_id) REFERENCES tx_results(id) ON DELETE CASCADE
|
||||
FOREIGN KEY (tx_result_id)
|
||||
REFERENCES tx_results(id)
|
||||
ON DELETE CASCADE
|
||||
);
|
||||
CREATE INDEX idx_block_events_key_value ON block_events(key, value);
|
||||
CREATE INDEX idx_tx_events_key_value ON tx_events(key, value);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
// Code generated by mockery v0.0.0-dev. DO NOT EDIT.
|
||||
// Code generated by mockery 2.9.0. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
@@ -95,48 +95,6 @@ func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadFromDBOrGenesisDoc provides a mock function with given fields: _a0
|
||||
func (_m *Store) LoadFromDBOrGenesisDoc(_a0 *types.GenesisDoc) (state.State, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 state.State
|
||||
if rf, ok := ret.Get(0).(func(*types.GenesisDoc) state.State); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(state.State)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(*types.GenesisDoc) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadFromDBOrGenesisFile provides a mock function with given fields: _a0
|
||||
func (_m *Store) LoadFromDBOrGenesisFile(_a0 string) (state.State, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 state.State
|
||||
if rf, ok := ret.Get(0).(func(string) state.State); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Get(0).(state.State)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(string) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadValidators provides a mock function with given fields: _a0
|
||||
func (_m *Store) LoadValidators(_a0 int64) (*types.ValidatorSet, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
@@ -110,13 +110,13 @@ func TestABCIResponsesSaveLoad1(t *testing.T) {
|
||||
|
||||
abciResponses := new(tmstate.ABCIResponses)
|
||||
dtxs := make([]*abci.ResponseDeliverTx, 2)
|
||||
abciResponses.DeliverTxs = dtxs
|
||||
abciResponses.FinalizeBlock.Txs = dtxs
|
||||
|
||||
abciResponses.DeliverTxs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil}
|
||||
abciResponses.DeliverTxs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil}
|
||||
abciResponses.FinalizeBlock.Txs[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Events: nil}
|
||||
abciResponses.FinalizeBlock.Txs[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Events: nil}
|
||||
pbpk, err := cryptoenc.PubKeyToProto(ed25519.GenPrivKey().PubKey())
|
||||
require.NoError(t, err)
|
||||
abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}}
|
||||
abciResponses.FinalizeBlock = &abci.ResponseFinalizeBlock{ValidatorUpdates: []abci.ValidatorUpdate{{PubKey: pbpk, Power: 10}}}
|
||||
|
||||
err = stateStore.SaveABCIResponses(block.Height, abciResponses)
|
||||
require.NoError(t, err)
|
||||
@@ -191,9 +191,9 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
|
||||
for i, tc := range cases {
|
||||
h := int64(i + 1) // last block height, one below what we save
|
||||
responses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
DeliverTxs: tc.added,
|
||||
EndBlock: &abci.ResponseEndBlock{},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
Txs: tc.added,
|
||||
},
|
||||
}
|
||||
err := stateStore.SaveABCIResponses(h, responses)
|
||||
require.NoError(t, err)
|
||||
@@ -206,9 +206,9 @@ func TestABCIResponsesSaveLoad2(t *testing.T) {
|
||||
if assert.NoError(err, "%d", i) {
|
||||
t.Log(res)
|
||||
responses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
DeliverTxs: tc.expected,
|
||||
EndBlock: &abci.ResponseEndBlock{},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
Txs: tc.added,
|
||||
},
|
||||
}
|
||||
assert.Equal(sm.ABCIResponsesResultsHash(responses), sm.ABCIResponsesResultsHash(res), "%d", i)
|
||||
}
|
||||
@@ -275,7 +275,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) {
|
||||
power++
|
||||
}
|
||||
header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates)
|
||||
require.NoError(t, err)
|
||||
@@ -451,10 +451,11 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) {
|
||||
block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit))
|
||||
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
assert.NoError(t, err)
|
||||
@@ -566,10 +567,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
|
||||
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
|
||||
// no updates:
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
@@ -629,7 +631,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
|
||||
updatedVal2,
|
||||
)
|
||||
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
@@ -669,10 +671,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
|
||||
// -> proposers should alternate:
|
||||
oldState := updatedState3
|
||||
abciResponses = &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
oldState, err = sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
@@ -685,10 +688,11 @@ func TestProposerPriorityProposerAlternates(t *testing.T) {
|
||||
for i := 0; i < 1000; i++ {
|
||||
// no validator updates:
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
@@ -743,10 +747,11 @@ func TestLargeGenesisValidator(t *testing.T) {
|
||||
for i := 0; i < 10; i++ {
|
||||
// no updates:
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
|
||||
@@ -775,8 +780,9 @@ func TestLargeGenesisValidator(t *testing.T) {
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal})
|
||||
assert.NoError(t, err)
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal},
|
||||
},
|
||||
}
|
||||
block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
|
||||
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
|
||||
@@ -787,10 +793,11 @@ func TestLargeGenesisValidator(t *testing.T) {
|
||||
for i := 0; i < 200; i++ {
|
||||
// no updates:
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit))
|
||||
@@ -823,8 +830,9 @@ func TestLargeGenesisValidator(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: []abci.ValidatorUpdate{addedVal},
|
||||
},
|
||||
}
|
||||
block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
|
||||
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
|
||||
@@ -838,12 +846,13 @@ func TestLargeGenesisValidator(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
removeGenesisVal := abci.ValidatorUpdate{PubKey: gp, Power: 0}
|
||||
abciResponses = &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal},
|
||||
},
|
||||
}
|
||||
block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit))
|
||||
blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates)
|
||||
require.NoError(t, err)
|
||||
@@ -857,10 +866,11 @@ func TestLargeGenesisValidator(t *testing.T) {
|
||||
isProposerUnchanged := true
|
||||
for isProposerUnchanged {
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit))
|
||||
blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()}
|
||||
@@ -881,10 +891,11 @@ func TestLargeGenesisValidator(t *testing.T) {
|
||||
for i := 0; i < 100; i++ {
|
||||
// no updates:
|
||||
abciResponses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
ValidatorUpdates: nil,
|
||||
},
|
||||
}
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
|
||||
block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit))
|
||||
@@ -947,7 +958,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) {
|
||||
|
||||
// Save state etc.
|
||||
var validatorUpdates []*types.Validator
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates)
|
||||
require.Nil(t, err)
|
||||
@@ -1024,7 +1035,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) {
|
||||
cp = params[changeIndex]
|
||||
}
|
||||
header, blockID, responses := makeHeaderPartsResponsesParams(state, &cp)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates)
|
||||
validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.FinalizeBlock.ValidatorUpdates)
|
||||
require.NoError(t, err)
|
||||
state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates)
|
||||
|
||||
|
||||
@@ -401,7 +401,7 @@ func (store dbStore) reverseBatchDelete(batch dbm.Batch, start, end []byte) ([]b
|
||||
//
|
||||
// See merkle.SimpleHashFromByteSlices
|
||||
func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte {
|
||||
return types.NewResults(ar.DeliverTxs).Hash()
|
||||
return types.NewResults(ar.FinalizeBlock.Txs).Hash()
|
||||
}
|
||||
|
||||
// LoadABCIResponses loads the ABCIResponses for the given height from the
|
||||
@@ -445,13 +445,13 @@ func (store dbStore) SaveABCIResponses(height int64, abciResponses *tmstate.ABCI
|
||||
func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCIResponses) error {
|
||||
var dtxs []*abci.ResponseDeliverTx
|
||||
// strip nil values,
|
||||
for _, tx := range abciResponses.DeliverTxs {
|
||||
for _, tx := range abciResponses.FinalizeBlock.Txs {
|
||||
if tx != nil {
|
||||
dtxs = append(dtxs, tx)
|
||||
}
|
||||
}
|
||||
|
||||
abciResponses.DeliverTxs = dtxs
|
||||
abciResponses.FinalizeBlock.Txs = dtxs
|
||||
|
||||
bz, err := abciResponses.Marshal()
|
||||
if err != nil {
|
||||
|
||||
@@ -225,10 +225,12 @@ func TestPruneStates(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
err = stateStore.SaveABCIResponses(h, &tmstate.ABCIResponses{
|
||||
DeliverTxs: []*abci.ResponseDeliverTx{
|
||||
{Data: []byte{1}},
|
||||
{Data: []byte{2}},
|
||||
{Data: []byte{3}},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
Txs: []*abci.ResponseDeliverTx{
|
||||
{Data: []byte{1}},
|
||||
{Data: []byte{2}},
|
||||
{Data: []byte{3}},
|
||||
},
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
@@ -287,17 +289,17 @@ func TestPruneStates(t *testing.T) {
|
||||
|
||||
func TestABCIResponsesResultsHash(t *testing.T) {
|
||||
responses := &tmstate.ABCIResponses{
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
DeliverTxs: []*abci.ResponseDeliverTx{
|
||||
{Code: 32, Data: []byte("Hello"), Log: "Huh?"},
|
||||
FinalizeBlock: &abci.ResponseFinalizeBlock{
|
||||
Txs: []*abci.ResponseDeliverTx{
|
||||
{Code: 32, Data: []byte("Hello"), Log: "Huh?"},
|
||||
},
|
||||
},
|
||||
EndBlock: &abci.ResponseEndBlock{},
|
||||
}
|
||||
|
||||
root := sm.ABCIResponsesResultsHash(responses)
|
||||
|
||||
// root should be Merkle tree root of DeliverTxs responses
|
||||
results := types.NewResults(responses.DeliverTxs)
|
||||
results := types.NewResults(responses.FinalizeBlock.Txs)
|
||||
assert.Equal(t, root, results.Hash())
|
||||
|
||||
// test we can prove first DeliverTx
|
||||
|
||||
@@ -14,9 +14,9 @@ var (
|
||||
// testnetCombinations defines global testnet options, where we generate a
|
||||
// separate testnet for each combination (Cartesian product) of options.
|
||||
testnetCombinations = map[string][]interface{}{
|
||||
"topology": {"quad", "large"},
|
||||
"ipv6": {false},
|
||||
"p2p": {NewP2PMode},
|
||||
"topology": {"single", "quad", "large"},
|
||||
"ipv6": {false, true},
|
||||
"p2p": {NewP2PMode, LegacyP2PMode, HybridP2PMode},
|
||||
"queueType": {"priority"}, // "fifo", "wdrr"
|
||||
"initialHeight": {0, 1000},
|
||||
"initialState": {
|
||||
@@ -24,13 +24,14 @@ var (
|
||||
map[string]string{"initial01": "a", "initial02": "b", "initial03": "c"},
|
||||
},
|
||||
"validators": {"genesis", "initchain"},
|
||||
"keyType": {types.ABCIPubKeyTypeEd25519},
|
||||
"keyType": {types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
|
||||
// The following specify randomly chosen values for testnet nodes.
|
||||
nodeDatabases = uniformChoice{"badgerdb"}
|
||||
nodeABCIProtocols = uniformChoice{"builtin"}
|
||||
nodePrivvalProtocols = uniformChoice{"file"}
|
||||
nodeDatabases = uniformChoice{"goleveldb", "cleveldb", "rocksdb", "boltdb", "badgerdb"}
|
||||
// FIXME: grpc disabled due to https://github.com/tendermint/tendermint/issues/5439
|
||||
nodeABCIProtocols = uniformChoice{"unix", "tcp", "builtin"} // "grpc"
|
||||
nodePrivvalProtocols = uniformChoice{"file", "unix", "tcp", "grpc"}
|
||||
// FIXME: v2 disabled due to flake
|
||||
nodeFastSyncs = uniformChoice{"v0"} // "v2"
|
||||
nodeStateSyncs = uniformChoice{false, true}
|
||||
@@ -44,7 +45,6 @@ var (
|
||||
"restart": 0.1,
|
||||
}
|
||||
evidence = uniformChoice{0, 1, 10}
|
||||
txSize = uniformChoice{1024, 10240} // either 1kb or 10kb
|
||||
)
|
||||
|
||||
// Generate generates random testnets using the given RNG.
|
||||
@@ -93,7 +93,6 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
|
||||
KeyType: opt["keyType"].(string),
|
||||
Evidence: evidence.Choose(r).(int),
|
||||
QueueType: opt["queueType"].(string),
|
||||
TxSize: int64(txSize.Choose(r).(int)),
|
||||
}
|
||||
|
||||
var p2pNodeFactor int
|
||||
@@ -129,6 +128,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
|
||||
// First we generate seed nodes, starting at the initial height.
|
||||
for i := 1; i <= numSeeds; i++ {
|
||||
node := generateNode(r, e2e.ModeSeed, 0, manifest.InitialHeight, false)
|
||||
node.QueueType = manifest.QueueType
|
||||
|
||||
if p2pNodeFactor == 0 {
|
||||
node.DisableLegacyP2P = manifest.DisableLegacyP2P
|
||||
@@ -154,6 +154,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
|
||||
node := generateNode(
|
||||
r, e2e.ModeValidator, startAt, manifest.InitialHeight, i <= 2)
|
||||
|
||||
node.QueueType = manifest.QueueType
|
||||
if p2pNodeFactor == 0 {
|
||||
node.DisableLegacyP2P = manifest.DisableLegacyP2P
|
||||
} else if p2pNodeFactor%i == 0 {
|
||||
@@ -189,7 +190,7 @@ func generateTestnet(r *rand.Rand, opt map[string]interface{}) (e2e.Manifest, er
|
||||
nextStartAt += 5
|
||||
}
|
||||
node := generateNode(r, e2e.ModeFull, startAt, manifest.InitialHeight, false)
|
||||
|
||||
node.QueueType = manifest.QueueType
|
||||
if p2pNodeFactor == 0 {
|
||||
node.DisableLegacyP2P = manifest.DisableLegacyP2P
|
||||
} else if p2pNodeFactor%i == 0 {
|
||||
|
||||
@@ -52,7 +52,8 @@ seeds = ["seed02"]
|
||||
[node.validator03]
|
||||
database = "badgerdb"
|
||||
seeds = ["seed01"]
|
||||
abci_protocol = "grpc"
|
||||
# FIXME: should be grpc, disabled due to https://github.com/tendermint/tendermint/issues/5439
|
||||
#abci_protocol = "grpc"
|
||||
persist_interval = 3
|
||||
perturb = ["kill"]
|
||||
privval_protocol = "grpc"
|
||||
@@ -69,7 +70,8 @@ database = "cleveldb"
|
||||
fast_sync = "v0"
|
||||
seeds = ["seed02"]
|
||||
start_at = 1005 # Becomes part of the validator set at 1010
|
||||
abci_protocol = "grpc"
|
||||
# FIXME: should be grpc, disabled due to https://github.com/tendermint/tendermint/issues/5439
|
||||
#abci_protocol = "grpc"
|
||||
perturb = ["kill", "pause", "disconnect", "restart"]
|
||||
privval_protocol = "tcp"
|
||||
|
||||
|
||||
@@ -64,9 +64,6 @@ type Manifest struct {
|
||||
|
||||
// QueueType describes the type of queue that the system uses internally
|
||||
QueueType string `toml:"queue_type"`
|
||||
|
||||
// Number of bytes per tx. Default is 1kb (1024)
|
||||
TxSize int64
|
||||
}
|
||||
|
||||
// ManifestNode represents a node in a testnet manifest.
|
||||
@@ -146,6 +143,10 @@ type ManifestNode struct {
|
||||
|
||||
// UseNewP2P enables use of the new p2p layer for this node.
|
||||
DisableLegacyP2P bool `toml:"disable_legacy_p2p"`
|
||||
|
||||
// QueueType describes the type of queue that the p2p layer
|
||||
// uses internally.
|
||||
QueueType string `toml:"queue_type"`
|
||||
}
|
||||
|
||||
// Save saves the testnet manifest to a file.
|
||||
|
||||
@@ -66,7 +66,6 @@ type Testnet struct {
|
||||
KeyType string
|
||||
Evidence int
|
||||
LogLevel string
|
||||
TxSize int64
|
||||
}
|
||||
|
||||
// Node represents a Tendermint node in a testnet.
|
||||
@@ -134,14 +133,10 @@ func LoadTestnet(file string) (*Testnet, error) {
|
||||
Evidence: manifest.Evidence,
|
||||
KeyType: "ed25519",
|
||||
LogLevel: manifest.LogLevel,
|
||||
TxSize: manifest.TxSize,
|
||||
}
|
||||
if len(manifest.KeyType) != 0 {
|
||||
testnet.KeyType = manifest.KeyType
|
||||
}
|
||||
if testnet.TxSize <= 0 {
|
||||
testnet.TxSize = 1024
|
||||
}
|
||||
if manifest.InitialHeight > 0 {
|
||||
testnet.InitialHeight = manifest.InitialHeight
|
||||
}
|
||||
@@ -174,8 +169,8 @@ func LoadTestnet(file string) (*Testnet, error) {
|
||||
RetainBlocks: nodeManifest.RetainBlocks,
|
||||
Perturbations: []Perturbation{},
|
||||
LogLevel: manifest.LogLevel,
|
||||
DisableLegacyP2P: manifest.DisableLegacyP2P,
|
||||
QueueType: manifest.QueueType,
|
||||
DisableLegacyP2P: manifest.DisableLegacyP2P || nodeManifest.DisableLegacyP2P,
|
||||
}
|
||||
|
||||
if node.StartAt == testnet.InitialHeight {
|
||||
@@ -325,11 +320,6 @@ func (n Node) Validate(testnet Testnet) error {
|
||||
default:
|
||||
return fmt.Errorf("invalid fast sync setting %q", n.FastSync)
|
||||
}
|
||||
switch n.QueueType {
|
||||
case "", "priority", "wdrr", "fifo":
|
||||
default:
|
||||
return fmt.Errorf("unsupported p2p queue type: %s", n.QueueType)
|
||||
}
|
||||
switch n.Database {
|
||||
case "goleveldb", "cleveldb", "boltdb", "rocksdb", "badgerdb":
|
||||
default:
|
||||
|
||||
@@ -33,14 +33,14 @@ func execVerbose(args ...string) error {
|
||||
// execCompose runs a Docker Compose command for a testnet.
|
||||
func execCompose(dir string, args ...string) error {
|
||||
return exec(append(
|
||||
[]string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
[]string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
args...)...)
|
||||
}
|
||||
|
||||
// execComposeVerbose runs a Docker Compose command for a testnet and displays its output.
|
||||
func execComposeVerbose(dir string, args ...string) error {
|
||||
return execVerbose(append(
|
||||
[]string{"docker-compose", "--ansi=never", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
[]string{"docker-compose", "-f", filepath.Join(dir, "docker-compose.yml")},
|
||||
args...)...)
|
||||
}
|
||||
|
||||
|
||||
@@ -38,7 +38,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error {
|
||||
logger.Info(fmt.Sprintf("Starting transaction load (%v workers)...", concurrency))
|
||||
started := time.Now()
|
||||
|
||||
go loadGenerate(ctx, chTx, multiplier, testnet.TxSize)
|
||||
go loadGenerate(ctx, chTx, multiplier)
|
||||
|
||||
for w := 0; w < concurrency; w++ {
|
||||
go loadProcess(ctx, testnet, chTx, chSuccess)
|
||||
@@ -66,13 +66,13 @@ func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error {
|
||||
}
|
||||
|
||||
// loadGenerate generates jobs until the context is canceled
|
||||
func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, size int64) {
|
||||
func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int) {
|
||||
for i := 0; i < math.MaxInt64; i++ {
|
||||
// We keep generating the same 100 keys over and over, with different values.
|
||||
// We keep generating the same 1000 keys over and over, with different values.
|
||||
// This gives a reasonable load without putting too much data in the app.
|
||||
id := i % 100
|
||||
id := i % 1000
|
||||
|
||||
bz := make([]byte, size)
|
||||
bz := make([]byte, 1024) // 1kb hex-encoded
|
||||
_, err := rand.Read(bz)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to read random bytes: %v", err))
|
||||
@@ -81,8 +81,7 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, siz
|
||||
|
||||
select {
|
||||
case chTx <- tx:
|
||||
sqrtSize := int(math.Sqrt(float64(size)))
|
||||
time.Sleep(10 * time.Millisecond * time.Duration(sqrtSize/multiplier))
|
||||
time.Sleep(time.Second / time.Duration(multiplier))
|
||||
|
||||
case <-ctx.Done():
|
||||
close(chTx)
|
||||
|
||||
@@ -61,6 +61,9 @@ func NewCLI() *CLI {
|
||||
defer loadCancel()
|
||||
go func() {
|
||||
err := Load(ctx, cli.testnet, 1)
|
||||
if err != nil {
|
||||
logger.Error(fmt.Sprintf("Transaction load failed: %v", err.Error()))
|
||||
}
|
||||
chLoadResult <- err
|
||||
}()
|
||||
|
||||
@@ -92,9 +95,9 @@ func NewCLI() *CLI {
|
||||
|
||||
loadCancel()
|
||||
if err := <-chLoadResult; err != nil {
|
||||
return fmt.Errorf("transaction load failed: %w", err)
|
||||
return err
|
||||
}
|
||||
if err := Wait(cli.testnet, 5); err != nil { // wait for network to settle before tests
|
||||
if err := Wait(cli.testnet, 8); err != nil { // wait for network to settle before tests
|
||||
return err
|
||||
}
|
||||
if err := Test(cli.testnet); err != nil {
|
||||
@@ -235,7 +238,10 @@ func NewCLI() *CLI {
|
||||
Example: "runner logs validator03",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
return execComposeVerbose(cli.testnet.Dir, append([]string{"logs", "--no-color"}, args...)...)
|
||||
if len(args) == 1 {
|
||||
return execComposeVerbose(cli.testnet.Dir, "logs", args[0])
|
||||
}
|
||||
return execComposeVerbose(cli.testnet.Dir, "logs")
|
||||
},
|
||||
})
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ func Perturb(testnet *e2e.Testnet) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
time.Sleep(5 * time.Second) // give network some time to recover between each
|
||||
time.Sleep(3 * time.Second) // give network some time to recover between each
|
||||
}
|
||||
}
|
||||
return nil
|
||||
@@ -72,7 +72,7 @@ func PerturbNode(node *e2e.Node, perturbation e2e.Perturbation) (*rpctypes.Resul
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
status, err := waitForNode(node, 0, 30*time.Second)
|
||||
status, err := waitForNode(node, 0, 15*time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -55,7 +55,7 @@ func waitForHeight(testnet *e2e.Testnet, height int64) (*types.Block, *types.Blo
|
||||
if len(clients) == 0 {
|
||||
return nil, nil, errors.New("unable to connect to any network nodes")
|
||||
}
|
||||
if time.Since(lastIncrease) >= 30*time.Second {
|
||||
if time.Since(lastIncrease) >= 20*time.Second {
|
||||
if maxResult == nil {
|
||||
return nil, nil, errors.New("chain stalled at unknown height")
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func Start(testnet *e2e.Testnet) error {
|
||||
if err := execCompose(testnet.Dir, "up", "-d", node.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := waitForNode(node, 0, 30*time.Second); err != nil {
|
||||
if _, err := waitForNode(node, 0, 15*time.Second); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info(fmt.Sprintf("Node %v up on http://127.0.0.1:%v", node.Name, node.ProxyPort))
|
||||
|
||||
@@ -30,5 +30,5 @@ func WaitUntil(testnet *e2e.Testnet, height int64) error {
|
||||
// waitingTime estimates how long it should take for a node to reach the height.
|
||||
// More nodes in a network implies we may expect a slower network and may have to wait longer.
|
||||
func waitingTime(nodes int) time.Duration {
|
||||
return time.Duration(30+(nodes*5)) * time.Second
|
||||
return time.Duration(20+(nodes*4)) * time.Second
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user