mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-28 23:52:04 +00:00
Compare commits
10 Commits
wb/validat
...
add_persis
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
519f82e2d3 | ||
|
|
8109c10d5e | ||
|
|
7b0e98d0de | ||
|
|
24dbcb392b | ||
|
|
2f90325fb7 | ||
|
|
7f304bc498 | ||
|
|
33a0a48dbe | ||
|
|
073b99704d | ||
|
|
f14e81e21c | ||
|
|
bcae7e228e |
24
.github/workflows/build.yml
vendored
24
.github/workflows/build.yml
vendored
@@ -24,7 +24,7 @@ jobs:
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -45,7 +45,7 @@ jobs:
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -67,7 +67,7 @@ jobs:
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -80,3 +80,21 @@ jobs:
|
||||
run: test/app/test.sh
|
||||
shell: bash
|
||||
if: "env.GIT_DIFF != ''"
|
||||
|
||||
test_persistence:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
go.mod
|
||||
go.sum
|
||||
- run: make install
|
||||
- run: test/persist/test_failure_indices.sh
|
||||
shell: bash
|
||||
# if: "env.GIT_DIFF != ''"
|
||||
|
||||
2
.github/workflows/docker.yml
vendored
2
.github/workflows/docker.yml
vendored
@@ -43,7 +43,7 @@ jobs:
|
||||
|
||||
- name: Login to DockerHub
|
||||
if: ${{ github.event_name != 'pull_request' }}
|
||||
uses: docker/login-action@v1.12.0
|
||||
uses: docker/login-action@v1.10.0
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
2
.github/workflows/e2e.yml
vendored
2
.github/workflows/e2e.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
||||
with:
|
||||
go-version: '1.17'
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
2
.github/workflows/lint.yml
vendored
2
.github/workflows/lint.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
timeout-minutes: 8
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
|
||||
2
.github/workflows/linter.yml
vendored
2
.github/workflows/linter.yml
vendored
@@ -21,7 +21,7 @@ jobs:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2.4.0
|
||||
- name: Lint Code Base
|
||||
uses: docker://github/super-linter:v4
|
||||
uses: docker://github/super-linter:v3
|
||||
env:
|
||||
LINTER_RULES_PATH: .
|
||||
VALIDATE_ALL_CODEBASE: true
|
||||
|
||||
15
.github/workflows/tests.yml
vendored
15
.github/workflows/tests.yml
vendored
@@ -14,13 +14,13 @@ jobs:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
part: ["00", "01", "02", "03", "04", "05"]
|
||||
part: ["00", "01", "02", "03"]
|
||||
steps:
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "1.17"
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -30,7 +30,7 @@ jobs:
|
||||
Makefile
|
||||
- name: Run Go Tests
|
||||
run: |
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=6
|
||||
make test-group-${{ matrix.part }} NUM_SPLIT=4
|
||||
if: env.GIT_DIFF
|
||||
- uses: actions/upload-artifact@v2
|
||||
with:
|
||||
@@ -42,7 +42,7 @@ jobs:
|
||||
needs: tests
|
||||
steps:
|
||||
- uses: actions/checkout@v2.4.0
|
||||
- uses: technote-space/get-diff-action@v6.0.1
|
||||
- uses: technote-space/get-diff-action@v5
|
||||
with:
|
||||
PATTERNS: |
|
||||
**/**.go
|
||||
@@ -73,3 +73,10 @@ jobs:
|
||||
with:
|
||||
file: ./coverage.txt
|
||||
if: env.GIT_DIFF
|
||||
|
||||
test_persistence:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: run persistence tests
|
||||
working-directory: test/persist
|
||||
run: ./test_failure_indices.sh
|
||||
|
||||
15
CHANGELOG.md
15
CHANGELOG.md
@@ -174,21 +174,6 @@ Special thanks to external contributors on this release: @JayT106,
|
||||
- [cmd/tendermint/commands] [\#6623](https://github.com/tendermint/tendermint/pull/6623) replace `$HOME/.some/test/dir` with `t.TempDir` (@tanyabouman)
|
||||
- [statesync] \6807 Implement P2P state provider as an alternative to RPC (@cmwaters)
|
||||
|
||||
## v0.34.15
|
||||
|
||||
Special thanks to external contributors on this release: @thanethomson
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
- [\#7368](https://github.com/tendermint/tendermint/issues/7368) cmd: add integration test for rollback functionality (@cmwaters).
|
||||
- [\#7309](https://github.com/tendermint/tendermint/issues/7309) pubsub: Report a non-nil error when shutting down (fixes #7306).
|
||||
- [\#7057](https://github.com/tendermint/tendermint/pull/7057) Import Postgres driver support for the psql indexer (@creachadair).
|
||||
- [\#7106](https://github.com/tendermint/tendermint/pull/7106) Revert mutex change to ABCI Clients (@tychoish).
|
||||
|
||||
### IMPROVEMENTS
|
||||
|
||||
- [config] [\#7230](https://github.com/tendermint/tendermint/issues/7230) rpc: Add experimental config params to allow for subscription buffer size control (@thanethomson).
|
||||
|
||||
## v0.34.14
|
||||
|
||||
This release backports the `rollback` feature to allow recovery in the event of an incorrect app hash.
|
||||
|
||||
@@ -12,8 +12,7 @@ Special thanks to external contributors on this release:
|
||||
|
||||
- CLI/RPC/Config
|
||||
|
||||
- [rpc] \#7575 Rework how RPC responses are written back via HTTP. (@creachadair)
|
||||
- [rpc] \#7121 Remove the deprecated gRPC interface to the RPC service. (@creachadair)
|
||||
- [rpc] Remove the deprecated gRPC interface to the RPC service. (@creachadair)
|
||||
- [blocksync] \#7159 Remove support for disabling blocksync in any circumstance. (@tychoish)
|
||||
- [mempool] \#7171 Remove legacy mempool implementation. (@tychoish)
|
||||
|
||||
@@ -25,14 +24,9 @@ Special thanks to external contributors on this release:
|
||||
|
||||
- [p2p] \#7035 Remove legacy P2P routing implementation and associated configuration options. (@tychoish)
|
||||
- [p2p] \#7265 Peer manager reduces peer score for each failed dial attempts for peers that have not successfully dialed. (@tychoish)
|
||||
- [p2p] [\#7594](https://github.com/tendermint/tendermint/pull/7594) always advertise self, to enable mutual address discovery. (@altergui)
|
||||
|
||||
- Go API
|
||||
|
||||
- [rpc] \#7474 Remove the "URI" RPC client. (@creachadair)
|
||||
- [libs/pubsub] \#7451 Internalize the pubsub packages. (@creachadair)
|
||||
- [libs/sync] \#7450 Internalize and remove the library. (@creachadair)
|
||||
- [libs/async] \#7449 Move library to internal. (@creachadair)
|
||||
- [pubsub] \#7231 Remove unbuffered subscriptions and rework the Subscription interface. (@creachadair)
|
||||
- [eventbus] \#7231 Move the EventBus type to the internal/eventbus package. (@creachadair)
|
||||
- [blocksync] \#7046 Remove v2 implementation of the blocksync service and recactor, which was disabled in the previous release. (@tychoish)
|
||||
@@ -48,19 +42,11 @@ Special thanks to external contributors on this release:
|
||||
- [rpc] [\#7270](https://github.com/tendermint/tendermint/pull/7270) Add `header` and `header_by_hash` RPC Client queries. (@fedekunze)
|
||||
- [cli] [#7033](https://github.com/tendermint/tendermint/pull/7033) Add a `rollback` command to rollback to the previous tendermint state in the event of non-determinstic app hash or reverting an upgrade.
|
||||
- [mempool, rpc] \#7041 Add removeTx operation to the RPC layer. (@tychoish)
|
||||
- [consensus] \#7354 add a new `synchrony` field to the `ConsensusParameter` struct for controlling the parameters of the proposer-based timestamp algorithm. (@williambanfield)
|
||||
- [consensus] \#7376 Update the proposal logic per the Propose-based timestamps specification so that the proposer will wait for the previous block time to occur before proposing the next block. (@williambanfield)
|
||||
- [consensus] \#7391 Use the proposed block timestamp as the proposal timestamp. Update the block validation logic to ensure that the proposed block's timestamp matches the timestamp in the proposal message. (@williambanfield)
|
||||
- [consensus] \#7415 Update proposal validation logic to Prevote nil if a proposal does not meet the conditions for Timelyness per the proposer-based timestamp specification. (@anca)
|
||||
- [consensus] \#7382 Update block validation to no longer require the block timestamp to be the median of the timestamps of the previous commit. (@anca)
|
||||
|
||||
### IMPROVEMENTS
|
||||
- [internal/protoio] \#7325 Optimized `MarshalDelimited` by inlining the common case and using a `sync.Pool` in the worst case. (@odeke-em)
|
||||
- [consensus] \#6969 remove logic to 'unlock' a locked block.
|
||||
|
||||
- [pubsub] \#7319 Performance improvements for the event query API (@creachadair)
|
||||
- [node] \#7521 Define concrete type for seed node implementation (@spacech1mp)
|
||||
- [rpc] \#7612 paginate mempool /unconfirmed_txs rpc endpoint (@spacech1mp)
|
||||
- [light] [\#7536](https://github.com/tendermint/tendermint/pull/7536) rpc /status call returns info about the light client (@jmalicevic)
|
||||
|
||||
### BUG FIXES
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
# stage 1 Generate Tendermint Binary
|
||||
FROM golang:1.17-alpine as builder
|
||||
FROM golang:1.16-alpine as builder
|
||||
RUN apk update && \
|
||||
apk upgrade && \
|
||||
apk --no-cache add make
|
||||
@@ -8,7 +8,7 @@ WORKDIR /tendermint
|
||||
RUN make build-linux
|
||||
|
||||
# stage 2
|
||||
FROM golang:1.17-alpine
|
||||
FROM golang:1.15-alpine
|
||||
LABEL maintainer="hello@tendermint.com"
|
||||
|
||||
# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json
|
||||
|
||||
6
Makefile
6
Makefile
@@ -299,11 +299,9 @@ NUM_SPLIT ?= 4
|
||||
$(BUILDDIR):
|
||||
mkdir -p $@
|
||||
|
||||
# The format statement filters out all packages that don't have tests.
|
||||
# Note we need to check for both in-package tests (.TestGoFiles) and
|
||||
# out-of-package tests (.XTestGoFiles).
|
||||
# the format statement filters out all packages that don't have tests.
|
||||
$(BUILDDIR)/packages.txt:$(GO_TEST_FILES) $(BUILDDIR)
|
||||
go list -f "{{ if (or .TestGoFiles .XTestGoFiles) }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
go list -f "{{ if .TestGoFiles }}{{ .ImportPath }}{{ end }}" ./... | sort > $@
|
||||
|
||||
split-test-packages:$(BUILDDIR)/packages.txt
|
||||
split -d -n l/$(NUM_SPLIT) $< $<.
|
||||
|
||||
@@ -33,24 +33,35 @@ type Client interface {
|
||||
|
||||
// Asynchronous requests
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
|
||||
// Synchronous requests
|
||||
Flush(context.Context) error
|
||||
Echo(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
Info(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTx(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTx(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
Query(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
Commit(context.Context) (*types.ResponseCommit, error)
|
||||
InitChain(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlock(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlock(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshots(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshot(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunk(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunk(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
@@ -16,8 +16,8 @@ type Creator func(log.Logger) (Client, error)
|
||||
func NewLocalCreator(app types.Application) Creator {
|
||||
mtx := new(sync.Mutex)
|
||||
|
||||
return func(logger log.Logger) (Client, error) {
|
||||
return NewLocalClient(logger, mtx, app), nil
|
||||
return func(_ log.Logger) (Client, error) {
|
||||
return NewLocalClient(mtx, app), nil
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -2,14 +2,12 @@ package abciclient
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
@@ -107,10 +105,7 @@ func (cli *grpcClient) OnStart(ctx context.Context) error {
|
||||
|
||||
RETRY_LOOP:
|
||||
for {
|
||||
conn, err := grpc.Dial(cli.addr,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(dialerFunc),
|
||||
)
|
||||
conn, err := grpc.Dial(cli.addr, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
if cli.mustConnect {
|
||||
return err
|
||||
@@ -126,14 +121,10 @@ RETRY_LOOP:
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(ctx, &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
|
||||
return err
|
||||
}
|
||||
|
||||
cli.logger.Error("Echo failed", "err", err)
|
||||
time.Sleep(time.Second * echoRetryIntervalSeconds)
|
||||
}
|
||||
@@ -161,9 +152,9 @@ func (cli *grpcClient) StopForError(err error) {
|
||||
}
|
||||
cli.mtx.Unlock()
|
||||
|
||||
cli.logger.Error("Stopping abci.grpcClient for error", "err", err)
|
||||
cli.logger.Error(fmt.Sprintf("Stopping abci.grpcClient for error: %v", err.Error()))
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.logger.Error("error stopping abci.grpcClient", "err", err)
|
||||
cli.logger.Error("Error stopping abci.grpcClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,6 +174,16 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
req := types.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestFlush()
|
||||
@@ -193,6 +194,16 @@ func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
|
||||
req := types.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
@@ -213,6 +224,106 @@ func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestChe
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
|
||||
req := types.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestCommit()
|
||||
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
|
||||
req := types.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
)
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
@@ -256,22 +367,30 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *grpcClient) Flush(ctx context.Context) error { return nil }
|
||||
|
||||
func (cli *grpcClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
req := types.ToRequestEcho(msg)
|
||||
return cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Info(
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.EchoAsync(ctx, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEcho(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InfoSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInfo,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
req := types.ToRequestInfo(params)
|
||||
return cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.InfoAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInfo(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) DeliverTx(
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
@@ -283,7 +402,7 @@ func (cli *grpcClient) DeliverTx(
|
||||
return cli.finishSyncCall(reqres).GetDeliverTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CheckTx(
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
@@ -295,76 +414,103 @@ func (cli *grpcClient) CheckTx(
|
||||
return cli.finishSyncCall(reqres).GetCheckTx(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Query(
|
||||
func (cli *grpcClient) QuerySync(
|
||||
ctx context.Context,
|
||||
params types.RequestQuery,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
req := types.ToRequestQuery(params)
|
||||
return cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.QueryAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
req := types.ToRequestCommit()
|
||||
return cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.CommitAsync(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetCommit(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) InitChain(
|
||||
func (cli *grpcClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
req := types.ToRequestInitChain(params)
|
||||
return cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.InitChainAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetInitChain(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) BeginBlock(
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
return cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetBeginBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EndBlock(
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
req := types.ToRequestEndBlock(params)
|
||||
return cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetEndBlock(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ListSnapshots(
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
return cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.ListSnapshotsAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetListSnapshots(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) OfferSnapshot(
|
||||
func (cli *grpcClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
params types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
return cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.OfferSnapshotAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetOfferSnapshot(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunk(
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
return cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetLoadSnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunk(
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
return cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishSyncCall(reqres).GetApplySnapshotChunk(), cli.Error()
|
||||
}
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"sync"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
)
|
||||
|
||||
@@ -27,7 +26,7 @@ var _ Client = (*localClient)(nil)
|
||||
// methods of the given app.
|
||||
//
|
||||
// Both Async and Sync methods ignore the given context.Context parameter.
|
||||
func NewLocalClient(logger log.Logger, mtx *sync.Mutex, app types.Application) Client {
|
||||
func NewLocalClient(mtx *sync.Mutex, app types.Application) Client {
|
||||
if mtx == nil {
|
||||
mtx = new(sync.Mutex)
|
||||
}
|
||||
@@ -35,7 +34,7 @@ func NewLocalClient(logger log.Logger, mtx *sync.Mutex, app types.Application) C
|
||||
mtx: mtx,
|
||||
Application: app,
|
||||
}
|
||||
cli.BaseService = *service.NewBaseService(logger, "localClient", cli)
|
||||
cli.BaseService = *service.NewBaseService(nil, "localClient", cli)
|
||||
return cli
|
||||
}
|
||||
|
||||
@@ -58,6 +57,27 @@ func (app *localClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return newLocalReqRes(types.ToRequestFlush(), nil), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
return app.callback(
|
||||
types.ToRequestEcho(msg),
|
||||
types.ToResponseEcho(msg),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Info(req)
|
||||
return app.callback(
|
||||
types.ToRequestInfo(req),
|
||||
types.ToResponseInfo(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
@@ -80,17 +100,122 @@ func (app *localClient) CheckTxAsync(ctx context.Context, req types.RequestCheck
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Query(req)
|
||||
return app.callback(
|
||||
types.ToRequestQuery(req),
|
||||
types.ToResponseQuery(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.Commit()
|
||||
return app.callback(
|
||||
types.ToRequestCommit(),
|
||||
types.ToResponseCommit(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.InitChain(req)
|
||||
return app.callback(
|
||||
types.ToRequestInitChain(req),
|
||||
types.ToResponseInitChain(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.BeginBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestBeginBlock(req),
|
||||
types.ToResponseBeginBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.EndBlock(req)
|
||||
return app.callback(
|
||||
types.ToRequestEndBlock(req),
|
||||
types.ToResponseEndBlock(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ListSnapshots(req)
|
||||
return app.callback(
|
||||
types.ToRequestListSnapshots(req),
|
||||
types.ToResponseListSnapshots(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.OfferSnapshot(req)
|
||||
return app.callback(
|
||||
types.ToRequestOfferSnapshot(req),
|
||||
types.ToResponseOfferSnapshot(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.LoadSnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestLoadSnapshotChunk(req),
|
||||
types.ToResponseLoadSnapshotChunk(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
res := app.Application.ApplySnapshotChunk(req)
|
||||
return app.callback(
|
||||
types.ToRequestApplySnapshotChunk(req),
|
||||
types.ToResponseApplySnapshotChunk(res),
|
||||
), nil
|
||||
}
|
||||
|
||||
//-------------------------------------------------------
|
||||
|
||||
func (app *localClient) Flush(ctx context.Context) error {
|
||||
func (app *localClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (app *localClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (app *localClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
return &types.ResponseEcho{Message: msg}, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
func (app *localClient) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -98,7 +223,7 @@ func (app *localClient) Info(ctx context.Context, req types.RequestInfo) (*types
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) DeliverTx(
|
||||
func (app *localClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
@@ -110,7 +235,7 @@ func (app *localClient) DeliverTx(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) CheckTx(
|
||||
func (app *localClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
@@ -121,7 +246,7 @@ func (app *localClient) CheckTx(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Query(
|
||||
func (app *localClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
@@ -132,7 +257,7 @@ func (app *localClient) Query(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
func (app *localClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
app.mtx.Lock()
|
||||
defer app.mtx.Unlock()
|
||||
|
||||
@@ -140,7 +265,7 @@ func (app *localClient) Commit(ctx context.Context) (*types.ResponseCommit, erro
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) InitChain(
|
||||
func (app *localClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
@@ -152,7 +277,7 @@ func (app *localClient) InitChain(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) BeginBlock(
|
||||
func (app *localClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
@@ -164,7 +289,7 @@ func (app *localClient) BeginBlock(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) EndBlock(
|
||||
func (app *localClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
@@ -176,7 +301,7 @@ func (app *localClient) EndBlock(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ListSnapshots(
|
||||
func (app *localClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
@@ -188,7 +313,7 @@ func (app *localClient) ListSnapshots(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) OfferSnapshot(
|
||||
func (app *localClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
@@ -200,7 +325,7 @@ func (app *localClient) OfferSnapshot(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) LoadSnapshotChunk(
|
||||
func (app *localClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
@@ -211,7 +336,7 @@ func (app *localClient) LoadSnapshotChunk(
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
func (app *localClient) ApplySnapshotChunk(
|
||||
func (app *localClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
|
||||
@@ -17,8 +17,31 @@ type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// ApplySnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
@@ -40,16 +63,16 @@ func (_m *Client) ApplySnapshotChunk(_a0 context.Context, _a1 types.RequestApply
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// BeginBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -63,21 +86,21 @@ func (_m *Client) BeginBlock(_a0 context.Context, _a1 types.RequestBeginBlock) (
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CheckTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTx(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -109,8 +132,54 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Commit provides a mock function with given fields: _a0
|
||||
func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
// CheckTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context) error); ok {
|
||||
r1 = rf(_a0)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
@@ -132,29 +201,6 @@ func (_m *Client) Commit(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTx provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTx(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
@@ -178,8 +224,54 @@ func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Echo provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoAsync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abciclient.ReqRes); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, string) error); ok {
|
||||
r1 = rf(ctx, msg)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
@@ -201,8 +293,31 @@ func (_m *Client) Echo(ctx context.Context, msg string) (*types.ResponseEcho, er
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlock provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlock(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
@@ -238,20 +353,6 @@ func (_m *Client) Error() error {
|
||||
return r0
|
||||
}
|
||||
|
||||
// Flush provides a mock function with given fields: _a0
|
||||
func (_m *Client) Flush(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// FlushAsync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0)
|
||||
@@ -275,8 +376,45 @@ func (_m *Client) FlushAsync(_a0 context.Context) (*abciclient.ReqRes, error) {
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Info provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
// FlushSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context) error); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
@@ -298,8 +436,31 @@ func (_m *Client) Info(_a0 context.Context, _a1 types.RequestInfo) (*types.Respo
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChain provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChain(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
@@ -335,8 +496,31 @@ func (_m *Client) IsRunning() bool {
|
||||
return r0
|
||||
}
|
||||
|
||||
// ListSnapshots provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
@@ -358,8 +542,31 @@ func (_m *Client) ListSnapshots(_a0 context.Context, _a1 types.RequestListSnapsh
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunk provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
@@ -381,8 +588,31 @@ func (_m *Client) LoadSnapshotChunk(_a0 context.Context, _a1 types.RequestLoadSn
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshot provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
@@ -404,8 +634,31 @@ func (_m *Client) OfferSnapshot(_a0 context.Context, _a1 types.RequestOfferSnaps
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// Query provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) Query(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abciclient.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abciclient.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abciclient.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*abciclient.ReqRes)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// QuerySync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
|
||||
@@ -222,10 +222,18 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
}
|
||||
@@ -234,9 +242,51 @@ func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestChec
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
@@ -261,143 +311,143 @@ func (cli *socketClient) Flush(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) Echo(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEcho(msg))
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEcho(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Info(
|
||||
func (cli *socketClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInfo(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInfo(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTx(
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestDeliverTx(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetDeliverTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTx(
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCheckTx(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCheckTx(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Query(
|
||||
func (cli *socketClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestQuery(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) Commit(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestCommit())
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetCommit(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChain(
|
||||
func (cli *socketClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestInitChain(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetInitChain(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlock(
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestBeginBlock(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetBeginBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlock(
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestEndBlock(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetEndBlock(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshots(
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestListSnapshots(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetListSnapshots(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshot(
|
||||
func (cli *socketClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestOfferSnapshot(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetOfferSnapshot(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunk(
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetLoadSnapshotChunk(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunk(
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlush(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -447,7 +497,7 @@ func (cli *socketClient) queueRequestAsync(
|
||||
return reqres, cli.Error()
|
||||
}
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlush(
|
||||
func (cli *socketClient) queueRequestAndFlushSync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
) (*ReqRes, error) {
|
||||
@@ -457,7 +507,7 @@ func (cli *socketClient) queueRequestAndFlush(
|
||||
return nil, queueErr(err)
|
||||
}
|
||||
|
||||
if err := cli.Flush(ctx); err != nil {
|
||||
if err := cli.FlushSync(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -542,6 +592,6 @@ func (cli *socketClient) stopForError(err error) {
|
||||
|
||||
cli.logger.Info("Stopping abci.socketClient", "reason", err)
|
||||
if err := cli.Stop(); err != nil {
|
||||
cli.logger.Error("error stopping abci.socketClient", "err", err)
|
||||
cli.logger.Error("Error stopping abci.socketClient", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -23,20 +23,20 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
defer cancel()
|
||||
|
||||
app := slowApp{}
|
||||
logger := log.NewNopLogger()
|
||||
logger := log.TestingLogger()
|
||||
|
||||
_, c := setupClientServer(ctx, t, logger, app)
|
||||
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
rsp, err := c.BeginBlock(ctx, types.RequestBeginBlock{})
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, c.Flush(ctx))
|
||||
assert.NotNil(t, rsp)
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case resp <- c.Error():
|
||||
}
|
||||
err = c.FlushSync(ctx)
|
||||
assert.NoError(t, err)
|
||||
res := reqres.Response.GetBeginBlock()
|
||||
assert.NotNil(t, res)
|
||||
resp <- c.Error()
|
||||
}()
|
||||
|
||||
select {
|
||||
|
||||
@@ -60,7 +60,7 @@ var RootCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
}
|
||||
|
||||
if client == nil {
|
||||
@@ -442,7 +442,7 @@ func cmdEcho(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
msg = args[0]
|
||||
}
|
||||
res, err := client.Echo(cmd.Context(), msg)
|
||||
res, err := client.EchoSync(cmd.Context(), msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -460,7 +460,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.Info(cmd.Context(), types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(cmd.Context(), types.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -485,7 +485,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTx(cmd.Context(), types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(cmd.Context(), types.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -511,7 +511,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTx(cmd.Context(), types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(cmd.Context(), types.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -526,7 +526,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
|
||||
// Get application Merkle root hash
|
||||
func cmdCommit(cmd *cobra.Command, args []string) error {
|
||||
res, err := client.Commit(cmd.Context())
|
||||
res, err := client.CommitSync(cmd.Context())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -551,7 +551,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.Query(cmd.Context(), types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(cmd.Context(), types.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
@@ -575,14 +575,15 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
app = kvstore.NewPersistentKVStoreApplication(logger, flagPersist)
|
||||
app = kvstore.NewPersistentKVStoreApplication(flagPersist)
|
||||
app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore"))
|
||||
}
|
||||
|
||||
// Start the listener
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
@@ -32,38 +31,34 @@ func init() {
|
||||
func TestKVStore(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
logger.Info("### Testing KVStore")
|
||||
testStream(ctx, t, logger, kvstore.NewApplication())
|
||||
fmt.Println("### Testing KVStore")
|
||||
testStream(ctx, t, kvstore.NewApplication())
|
||||
}
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
logger.Info("### Testing BaseApp")
|
||||
testStream(ctx, t, logger, types.NewBaseApplication())
|
||||
fmt.Println("### Testing BaseApp")
|
||||
testStream(ctx, t, types.NewBaseApplication())
|
||||
}
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
logger.Info("### Testing GRPC")
|
||||
testGRPCSync(ctx, t, logger, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
fmt.Println("### Testing GRPC")
|
||||
testGRPCSync(ctx, t, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
}
|
||||
|
||||
func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.Application) {
|
||||
func testStream(ctx context.Context, t *testing.T, app types.Application) {
|
||||
t.Helper()
|
||||
|
||||
const numDeliverTxs = 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
logger := log.TestingLogger()
|
||||
// Start the listener
|
||||
server := abciserver.NewSocketServer(logger.With("module", "abci-server"), socket, app)
|
||||
t.Cleanup(server.Wait)
|
||||
@@ -71,7 +66,7 @@ func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.
|
||||
require.NoError(t, err)
|
||||
|
||||
// Connect to the socket
|
||||
client := abciclient.NewSocketClient(logger.With("module", "abci-client"), socket, false)
|
||||
client := abciclient.NewSocketClient(log.TestingLogger().With("module", "abci-client"), socket, false)
|
||||
t.Cleanup(client.Wait)
|
||||
|
||||
err = client.Start(ctx)
|
||||
@@ -112,7 +107,7 @@ func testStream(ctx context.Context, t *testing.T, logger log.Logger, app types.
|
||||
|
||||
// Sometimes send flush messages
|
||||
if counter%128 == 0 {
|
||||
err = client.Flush(ctx)
|
||||
err = client.FlushSync(context.Background())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
}
|
||||
@@ -131,25 +126,26 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app types.ABCIApplicationServer) {
|
||||
t.Helper()
|
||||
func testGRPCSync(ctx context.Context, t *testing.T, app types.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("/tmp/test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
socket := fmt.Sprintf("unix://%v", socketFile)
|
||||
|
||||
logger := log.TestingLogger()
|
||||
// Start the listener
|
||||
server := abciserver.NewGRPCServer(logger.With("module", "abci-server"), socket, app)
|
||||
|
||||
require.NoError(t, server.Start(ctx))
|
||||
if err := server.Start(ctx); err != nil {
|
||||
t.Fatalf("Error starting GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() { server.Wait() })
|
||||
|
||||
// Connect to the socket
|
||||
conn, err := grpc.Dial(socket,
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||
grpc.WithContextDialer(dialerFunc),
|
||||
)
|
||||
require.NoError(t, err, "Error dialing GRPC server")
|
||||
conn, err := grpc.Dial(socket, grpc.WithInsecure(), grpc.WithContextDialer(dialerFunc))
|
||||
if err != nil {
|
||||
t.Fatalf("Error dialing GRPC server: %v", err.Error())
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
if err := conn.Close(); err != nil {
|
||||
@@ -162,9 +158,10 @@ func testGRPCSync(ctx context.Context, t *testing.T, logger log.Logger, app type
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.DeliverTx(ctx, &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err, "Error in GRPC DeliverTx")
|
||||
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
counter++
|
||||
if response.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", response.Code)
|
||||
|
||||
@@ -76,9 +76,7 @@ func TestPersistentKVStoreKV(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
key := testKey
|
||||
value := key
|
||||
tx := []byte(key)
|
||||
@@ -94,9 +92,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
InitKVStore(kvstore)
|
||||
height := int64(0)
|
||||
|
||||
@@ -128,9 +124,7 @@ func TestValUpdates(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
kvstore := NewPersistentKVStoreApplication(logger, dir)
|
||||
kvstore := NewPersistentKVStoreApplication(dir)
|
||||
|
||||
// init with some validators
|
||||
total := 10
|
||||
@@ -295,7 +289,7 @@ func makeGRPCClientServer(
|
||||
func TestClientServer(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
// set up socket app
|
||||
kvstore := NewApplication()
|
||||
@@ -330,39 +324,39 @@ func runClientTests(ctx context.Context, t *testing.T, client abciclient.Client)
|
||||
}
|
||||
|
||||
func testClient(ctx context.Context, t *testing.T, app abciclient.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTx(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.Commit(ctx)
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.Info(ctx, types.RequestInfo{})
|
||||
info, err := app.InfoSync(ctx, types.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.Query(ctx, types.RequestQuery{
|
||||
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.Query(ctx, types.RequestQuery{
|
||||
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, code.CodeTypeOK, resQuery.Code)
|
||||
require.Equal(t, key, string(resQuery.Key))
|
||||
require.Equal(t, value, string(resQuery.Value))
|
||||
|
||||
@@ -35,7 +35,7 @@ type PersistentKVStoreApplication struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *PersistentKVStoreApplication {
|
||||
func NewPersistentKVStoreApplication(dbDir string) *PersistentKVStoreApplication {
|
||||
name := "kvstore"
|
||||
db, err := dbm.NewGoLevelDB(name, dbDir)
|
||||
if err != nil {
|
||||
@@ -47,7 +47,7 @@ func NewPersistentKVStoreApplication(logger log.Logger, dbDir string) *Persisten
|
||||
return &PersistentKVStoreApplication{
|
||||
app: &Application{state: state},
|
||||
valAddrToPubKeyMap: make(map[string]cryptoproto.PublicKey),
|
||||
logger: logger,
|
||||
logger: log.NewNopLogger(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,6 +55,10 @@ func (app *PersistentKVStoreApplication) Close() error {
|
||||
return app.app.state.db.Close()
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
res := app.app.Info(req)
|
||||
res.LastBlockHeight = app.app.state.Height
|
||||
@@ -109,7 +113,7 @@ func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) t
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("error updating validators", "r", r)
|
||||
app.logger.Error("Error updating validators", "r", r)
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
@@ -267,7 +271,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("error encoding validator: %v", err)}
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
if err = app.app.state.db.Set(key, value.Bytes()); err != nil {
|
||||
panic(err)
|
||||
|
||||
@@ -58,7 +58,7 @@ func (s *GRPCServer) OnStart(ctx context.Context) error {
|
||||
}()
|
||||
|
||||
if err := s.server.Serve(s.listener); err != nil {
|
||||
s.logger.Error("error serving gRPC server", "err", err)
|
||||
s.logger.Error("Error serving gRPC server", "err", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
|
||||
@@ -61,7 +61,7 @@ func (s *SocketServer) OnStart(ctx context.Context) error {
|
||||
|
||||
func (s *SocketServer) OnStop() {
|
||||
if err := s.listener.Close(); err != nil {
|
||||
s.logger.Error("error closing listener", "err", err)
|
||||
s.logger.Error("Error closing listener", "err", err)
|
||||
}
|
||||
|
||||
s.connsMtx.Lock()
|
||||
@@ -70,7 +70,7 @@ func (s *SocketServer) OnStop() {
|
||||
for id, conn := range s.conns {
|
||||
delete(s.conns, id)
|
||||
if err := conn.Close(); err != nil {
|
||||
s.logger.Error("error closing connection", "id", id, "conn", conn, "err", err)
|
||||
s.logger.Error("Error closing connection", "id", id, "conn", conn, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func (s *SocketServer) waitForClose(ctx context.Context, closeConn chan error, c
|
||||
defer func() {
|
||||
// Close the connection
|
||||
if err := s.rmConn(connID); err != nil {
|
||||
s.logger.Error("error closing connection", "err", err)
|
||||
s.logger.Error("Error closing connection", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -259,26 +259,14 @@ func (s *SocketServer) handleResponses(
|
||||
responses <-chan *types.Response,
|
||||
) {
|
||||
bw := bufio.NewWriter(conn)
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
for res := range responses {
|
||||
if err := types.WriteMessage(res, bw); err != nil {
|
||||
closeConn <- fmt.Errorf("error writing message: %w", err)
|
||||
return
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
|
||||
return
|
||||
case res := <-responses:
|
||||
if err := types.WriteMessage(res, bw); err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case closeConn <- fmt.Errorf("error writing message: %w", err):
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := bw.Flush(); err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case closeConn <- fmt.Errorf("error flushing write buffer: %w", err):
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
abciclientent "github.com/tendermint/tendermint/abci/client"
|
||||
@@ -14,8 +13,6 @@ import (
|
||||
)
|
||||
|
||||
func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
@@ -24,17 +21,15 @@ func TestClientServerNoAddrPrefix(t *testing.T) {
|
||||
transport = "socket"
|
||||
)
|
||||
app := kvstore.NewApplication()
|
||||
logger := log.NewTestingLogger(t)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
server, err := abciserver.NewServer(logger, addr, transport, app)
|
||||
assert.NoError(t, err, "expected no error on NewServer")
|
||||
err = server.Start(ctx)
|
||||
assert.NoError(t, err, "expected no error on server.Start")
|
||||
t.Cleanup(server.Wait)
|
||||
|
||||
client, err := abciclientent.NewClient(logger, addr, transport, true)
|
||||
assert.NoError(t, err, "expected no error on NewClient")
|
||||
err = client.Start(ctx)
|
||||
assert.NoError(t, err, "expected no error on client.Start")
|
||||
t.Cleanup(client.Wait)
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
|
||||
power := mrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChain(ctx, types.RequestInitChain{
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -33,7 +33,7 @@ func InitChain(ctx context.Context, client abciclient.Client) error {
|
||||
}
|
||||
|
||||
func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error {
|
||||
res, err := client.Commit(ctx)
|
||||
res, err := client.CommitSync(ctx)
|
||||
data := res.Data
|
||||
if err != nil {
|
||||
fmt.Println("Failed test: Commit")
|
||||
@@ -50,7 +50,7 @@ func Commit(ctx context.Context, client abciclient.Client, hashExp []byte) error
|
||||
}
|
||||
|
||||
func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTx(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
@@ -69,7 +69,7 @@ func DeliverTx(ctx context.Context, client abciclient.Client, txBytes []byte, co
|
||||
}
|
||||
|
||||
func CheckTx(ctx context.Context, client abciclient.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTx(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
|
||||
func TestMarshalJSON(t *testing.T) {
|
||||
b, err := json.Marshal(&ResponseDeliverTx{})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
// include empty fields.
|
||||
assert.True(t, strings.Contains(string(b), "code"))
|
||||
r1 := ResponseCheckTx{
|
||||
@@ -31,11 +31,11 @@ func TestMarshalJSON(t *testing.T) {
|
||||
},
|
||||
}
|
||||
b, err = json.Marshal(&r1)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
var r2 ResponseCheckTx
|
||||
err = json.Unmarshal(b, &r2)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, r1, r2)
|
||||
}
|
||||
|
||||
@@ -49,11 +49,11 @@ func TestWriteReadMessageSimple(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
buf := new(bytes.Buffer)
|
||||
err := WriteMessage(c, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg := new(RequestEcho)
|
||||
err = ReadMessage(buf, msg)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, proto.Equal(c, msg))
|
||||
}
|
||||
@@ -71,11 +71,11 @@ func TestWriteReadMessage(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
buf := new(bytes.Buffer)
|
||||
err := WriteMessage(c, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg := new(tmproto.Header)
|
||||
err = ReadMessage(buf, msg)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, proto.Equal(c, msg))
|
||||
}
|
||||
@@ -103,11 +103,11 @@ func TestWriteReadMessage2(t *testing.T) {
|
||||
for _, c := range cases {
|
||||
buf := new(bytes.Buffer)
|
||||
err := WriteMessage(c, buf)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
msg := new(ResponseCheckTx)
|
||||
err = ReadMessage(buf, msg)
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
assert.True(t, proto.Equal(c, msg))
|
||||
}
|
||||
|
||||
@@ -9,8 +9,6 @@ import (
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
@@ -21,6 +19,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
grpcprivval "github.com/tendermint/tendermint/privval/grpc"
|
||||
privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval"
|
||||
@@ -46,7 +45,7 @@ func main() {
|
||||
rootCA = flag.String("rootcafile", "", "absolute path to root CA")
|
||||
prometheusAddr = flag.String("prometheus-addr", "", "address for prometheus endpoint (host:port)")
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo).
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false).
|
||||
With("module", "priv_val")
|
||||
)
|
||||
flag.Parse()
|
||||
@@ -134,10 +133,9 @@ func main() {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
opctx, opcancel := signal.NotifyContext(ctx, os.Interrupt, syscall.SIGTERM)
|
||||
defer opcancel()
|
||||
go func() {
|
||||
<-opctx.Done()
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(ctx, logger, func() {
|
||||
logger.Debug("SignerServer: calling Close")
|
||||
if *prometheusAddr != "" {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second)
|
||||
defer cancel()
|
||||
@@ -147,7 +145,7 @@ func main() {
|
||||
}
|
||||
}
|
||||
s.GracefulStop()
|
||||
}()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
|
||||
@@ -15,7 +15,7 @@ var (
|
||||
flagProfAddr = "pprof-laddr"
|
||||
flagFrequency = "frequency"
|
||||
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
)
|
||||
|
||||
// DebugCmd defines the root command containing subcommands that assist in
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package debug
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
@@ -43,7 +42,7 @@ func init() {
|
||||
)
|
||||
}
|
||||
|
||||
func dumpCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
func dumpCmdHandler(_ *cobra.Command, args []string) error {
|
||||
outDir := args[0]
|
||||
if outDir == "" {
|
||||
return errors.New("invalid output directory")
|
||||
@@ -64,24 +63,22 @@ func dumpCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("failed to create new http client: %w", err)
|
||||
}
|
||||
|
||||
ctx := cmd.Context()
|
||||
|
||||
home := viper.GetString(cli.HomeFlag)
|
||||
conf := config.DefaultConfig()
|
||||
conf = conf.SetRoot(home)
|
||||
config.EnsureRoot(conf.RootDir)
|
||||
|
||||
dumpDebugData(ctx, outDir, conf, rpc)
|
||||
dumpDebugData(outDir, conf, rpc)
|
||||
|
||||
ticker := time.NewTicker(time.Duration(frequency) * time.Second)
|
||||
for range ticker.C {
|
||||
dumpDebugData(ctx, outDir, conf, rpc)
|
||||
dumpDebugData(outDir, conf, rpc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
|
||||
func dumpDebugData(outDir string, conf *config.Config, rpc *rpchttp.HTTP) {
|
||||
start := time.Now().UTC()
|
||||
|
||||
tmpDir, err := os.MkdirTemp(outDir, "tendermint_debug_tmp")
|
||||
@@ -92,19 +89,19 @@ func dumpDebugData(ctx context.Context, outDir string, conf *config.Config, rpc
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
logger.Info("getting node status...")
|
||||
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
|
||||
if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil {
|
||||
logger.Error("failed to dump node status", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("getting node network info...")
|
||||
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
|
||||
if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil {
|
||||
logger.Error("failed to dump node network info", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Info("getting node consensus state...")
|
||||
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
logger.Error("failed to dump node consensus state", "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -33,7 +33,6 @@ $ tendermint debug kill 34255 /path/to/tm-debug.zip`,
|
||||
}
|
||||
|
||||
func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
pid, err := strconv.ParseInt(args[0], 10, 64)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -63,17 +62,17 @@ func killCmdHandler(cmd *cobra.Command, args []string) error {
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
logger.Info("getting node status...")
|
||||
if err := dumpStatus(ctx, rpc, tmpDir, "status.json"); err != nil {
|
||||
if err := dumpStatus(rpc, tmpDir, "status.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node network info...")
|
||||
if err := dumpNetInfo(ctx, rpc, tmpDir, "net_info.json"); err != nil {
|
||||
if err := dumpNetInfo(rpc, tmpDir, "net_info.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
logger.Info("getting node consensus state...")
|
||||
if err := dumpConsensusState(ctx, rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
if err := dumpConsensusState(rpc, tmpDir, "consensus_state.json"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
|
||||
// dumpStatus gets node status state dump from the Tendermint RPC and writes it
|
||||
// to file. It returns an error upon failure.
|
||||
func dumpStatus(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error {
|
||||
status, err := rpc.Status(ctx)
|
||||
func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error {
|
||||
status, err := rpc.Status(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node status: %w", err)
|
||||
}
|
||||
@@ -26,8 +26,8 @@ func dumpStatus(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) er
|
||||
|
||||
// dumpNetInfo gets network information state dump from the Tendermint RPC and
|
||||
// writes it to file. It returns an error upon failure.
|
||||
func dumpNetInfo(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error {
|
||||
netInfo, err := rpc.NetInfo(ctx)
|
||||
func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error {
|
||||
netInfo, err := rpc.NetInfo(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node network information: %w", err)
|
||||
}
|
||||
@@ -37,8 +37,8 @@ func dumpNetInfo(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) e
|
||||
|
||||
// dumpConsensusState gets consensus state dump from the Tendermint RPC and
|
||||
// writes it to file. It returns an error upon failure.
|
||||
func dumpConsensusState(ctx context.Context, rpc *rpchttp.HTTP, dir, filename string) error {
|
||||
consDump, err := rpc.DumpConsensusState(ctx)
|
||||
func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error {
|
||||
consDump, err := rpc.DumpConsensusState(context.Background())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get node consensus dump: %w", err)
|
||||
}
|
||||
|
||||
@@ -39,10 +39,10 @@ func initFiles(cmd *cobra.Command, args []string) error {
|
||||
return errors.New("must specify a node type: tendermint init [validator|full|seed]")
|
||||
}
|
||||
config.Mode = args[0]
|
||||
return initFilesWithConfig(cmd.Context(), config)
|
||||
return initFilesWithConfig(config)
|
||||
}
|
||||
|
||||
func initFilesWithConfig(ctx context.Context, config *cfg.Config) error {
|
||||
func initFilesWithConfig(config *cfg.Config) error {
|
||||
var (
|
||||
pv *privval.FilePV
|
||||
err error
|
||||
@@ -65,9 +65,7 @@ func initFilesWithConfig(ctx context.Context, config *cfg.Config) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pv.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
@@ -100,7 +98,7 @@ func initFilesWithConfig(ctx context.Context, config *cfg.Config) error {
|
||||
}
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ctxTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
// if this is a validator we add it to genesis
|
||||
|
||||
@@ -17,6 +17,7 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/light"
|
||||
lproxy "github.com/tendermint/tendermint/light/proxy"
|
||||
lrpc "github.com/tendermint/tendermint/light/rpc"
|
||||
@@ -102,7 +103,7 @@ func init() {
|
||||
}
|
||||
|
||||
func runProxy(cmd *cobra.Command, args []string) error {
|
||||
logger, err := log.NewDefaultLogger(logFormat, logLevel)
|
||||
logger, err := log.NewDefaultLogger(logFormat, logLevel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -187,14 +188,15 @@ func runProxy(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(cmd.Context(), logger, func() {
|
||||
p.Listener.Close()
|
||||
})
|
||||
|
||||
// this might be redundant to the above, eventually.
|
||||
ctx, cancel := signal.NotifyContext(cmd.Context(), syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
p.Listener.Close()
|
||||
}()
|
||||
|
||||
logger.Info("Starting proxy...", "laddr", listenAddr)
|
||||
if err := p.ListenAndServe(ctx); err != http.ErrServerClosed {
|
||||
// Error starting or closing listener:
|
||||
|
||||
@@ -25,13 +25,13 @@ const (
|
||||
base int64 = 2
|
||||
)
|
||||
|
||||
func setupReIndexEventCmd(ctx context.Context) *cobra.Command {
|
||||
func setupReIndexEventCmd() *cobra.Command {
|
||||
reIndexEventCmd := &cobra.Command{
|
||||
Use: ReIndexEventCmd.Use,
|
||||
Run: func(cmd *cobra.Command, args []string) {},
|
||||
}
|
||||
|
||||
_ = reIndexEventCmd.ExecuteContext(ctx)
|
||||
_ = reIndexEventCmd.ExecuteContext(context.Background())
|
||||
|
||||
return reIndexEventCmd
|
||||
}
|
||||
@@ -177,14 +177,11 @@ func TestReIndexEvent(t *testing.T) {
|
||||
{height, height, false},
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
for _, tc := range testCases {
|
||||
startHeight = tc.startHeight
|
||||
endHeight = tc.endHeight
|
||||
|
||||
err := eventReIndex(setupReIndexEventCmd(ctx), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
|
||||
err := eventReIndex(setupReIndexEventCmd(), []indexer.EventSink{mockEventSink}, mockBlockStore, mockStateStore)
|
||||
if tc.reIndexErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
|
||||
@@ -53,7 +53,7 @@ func ResetAll(dbDir, privValKeyFile, privValStateFile string, logger log.Logger)
|
||||
if err := os.RemoveAll(dbDir); err == nil {
|
||||
logger.Info("Removed all blockchain history", "dir", dbDir)
|
||||
} else {
|
||||
logger.Error("error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err)
|
||||
}
|
||||
// recreate the dbDir since the privVal state needs to live there
|
||||
if err := tmos.EnsureDir(dbDir, 0700); err != nil {
|
||||
@@ -68,9 +68,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pv.Reset(); err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Reset()
|
||||
logger.Info("Reset private validator file to genesis state", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
} else {
|
||||
@@ -78,9 +76,7 @@ func resetFilePV(privValKeyFile, privValStateFile string, logger log.Logger) err
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pv.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
pv.Save()
|
||||
logger.Info("Generated private validator file", "keyFile", privValKeyFile,
|
||||
"stateFile", privValStateFile)
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/cmd/tendermint/commands"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/rpc/client/local"
|
||||
rpctest "github.com/tendermint/tendermint/rpc/test"
|
||||
e2e "github.com/tendermint/tendermint/test/e2e/app"
|
||||
@@ -50,9 +49,7 @@ func TestRollbackIntegration(t *testing.T) {
|
||||
node2, _, err2 := rpctest.StartTendermint(ctx, cfg, app, rpctest.SuppressStdout)
|
||||
require.NoError(t, err2)
|
||||
|
||||
logger := log.NewTestingLogger(t)
|
||||
|
||||
client, err := local.New(logger, node2.(local.NodeService))
|
||||
client, err := local.New(node2.(local.NodeService))
|
||||
require.NoError(t, err)
|
||||
|
||||
ticker := time.NewTicker(200 * time.Millisecond)
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
var (
|
||||
config = cfg.DefaultConfig()
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo)
|
||||
logger = log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
ctxTimeout = 4 * time.Second
|
||||
)
|
||||
|
||||
@@ -36,7 +36,7 @@ func ParseConfig() (*cfg.Config, error) {
|
||||
conf.SetRoot(conf.RootDir)
|
||||
cfg.EnsureRoot(conf.RootDir)
|
||||
if err := conf.ValidateBasic(); err != nil {
|
||||
return nil, fmt.Errorf("error in config file: %w", err)
|
||||
return nil, fmt.Errorf("error in config file: %v", err)
|
||||
}
|
||||
return conf, nil
|
||||
}
|
||||
@@ -55,7 +55,7 @@ var RootCmd = &cobra.Command{
|
||||
return err
|
||||
}
|
||||
|
||||
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel)
|
||||
logger, err = log.NewDefaultLogger(config.LogFormat, config.LogLevel, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -18,12 +18,17 @@ import (
|
||||
)
|
||||
|
||||
// clearConfig clears env vars, the given root dir, and resets viper.
|
||||
func clearConfig(t *testing.T, dir string) {
|
||||
t.Helper()
|
||||
require.NoError(t, os.Unsetenv("TMHOME"))
|
||||
require.NoError(t, os.Unsetenv("TM_HOME"))
|
||||
require.NoError(t, os.RemoveAll(dir))
|
||||
func clearConfig(dir string) {
|
||||
if err := os.Unsetenv("TMHOME"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := os.Unsetenv("TM_HOME"); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(dir); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
viper.Reset()
|
||||
config = cfg.DefaultConfig()
|
||||
}
|
||||
@@ -41,9 +46,8 @@ func testRootCmd() *cobra.Command {
|
||||
return rootCmd
|
||||
}
|
||||
|
||||
func testSetup(t *testing.T, rootDir string, args []string, env map[string]string) error {
|
||||
t.Helper()
|
||||
clearConfig(t, rootDir)
|
||||
func testSetup(rootDir string, args []string, env map[string]string) error {
|
||||
clearConfig(rootDir)
|
||||
|
||||
rootCmd := testRootCmd()
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", rootDir)
|
||||
@@ -69,8 +73,8 @@ func TestRootHome(t *testing.T) {
|
||||
for i, tc := range cases {
|
||||
idxString := strconv.Itoa(i)
|
||||
|
||||
err := testSetup(t, defaultRoot, tc.args, tc.env)
|
||||
require.NoError(t, err, idxString)
|
||||
err := testSetup(defaultRoot, tc.args, tc.env)
|
||||
require.Nil(t, err, idxString)
|
||||
|
||||
assert.Equal(t, tc.root, config.RootDir, idxString)
|
||||
assert.Equal(t, tc.root, config.P2P.RootDir, idxString)
|
||||
@@ -101,8 +105,8 @@ func TestRootFlagsEnv(t *testing.T) {
|
||||
for i, tc := range cases {
|
||||
idxString := strconv.Itoa(i)
|
||||
|
||||
err := testSetup(t, defaultRoot, tc.args, tc.env)
|
||||
require.NoError(t, err, idxString)
|
||||
err := testSetup(defaultRoot, tc.args, tc.env)
|
||||
require.Nil(t, err, idxString)
|
||||
|
||||
assert.Equal(t, tc.logLevel, config.LogLevel, idxString)
|
||||
}
|
||||
@@ -130,17 +134,17 @@ func TestRootConfig(t *testing.T) {
|
||||
for i, tc := range cases {
|
||||
defaultRoot := t.TempDir()
|
||||
idxString := strconv.Itoa(i)
|
||||
clearConfig(t, defaultRoot)
|
||||
clearConfig(defaultRoot)
|
||||
|
||||
// XXX: path must match cfg.defaultConfigPath
|
||||
configFilePath := filepath.Join(defaultRoot, "config")
|
||||
err := tmos.EnsureDir(configFilePath, 0700)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
// write the non-defaults to a different path
|
||||
// TODO: support writing sub configs so we can test that too
|
||||
err = WriteConfigVals(configFilePath, cvals)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
rootCmd := testRootCmd()
|
||||
cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot)
|
||||
@@ -148,7 +152,7 @@ func TestRootConfig(t *testing.T) {
|
||||
// run with the args and env
|
||||
tc.args = append([]string{rootCmd.Use}, tc.args...)
|
||||
err = cli.RunWithArgs(cmd, tc.args, tc.env)
|
||||
require.NoError(t, err, idxString)
|
||||
require.Nil(t, err, idxString)
|
||||
|
||||
assert.Equal(t, tc.logLvl, config.LogLevel, idxString)
|
||||
}
|
||||
|
||||
@@ -62,7 +62,7 @@ func AddNodeFlags(cmd *cobra.Command) {
|
||||
"p2p.laddr",
|
||||
config.P2P.ListenAddress,
|
||||
"node listen address. (0.0.0.0:0 means any interface, any port)")
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes") //nolint: staticcheck
|
||||
cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "comma-delimited ID@host:port seed nodes")
|
||||
cmd.Flags().String("p2p.persistent-peers", config.P2P.PersistentPeers, "comma-delimited ID@host:port persistent peers")
|
||||
cmd.Flags().Bool("p2p.upnp", config.P2P.UPNP, "enable/disable UPNP port forwarding")
|
||||
cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "enable/disable Peer-Exchange")
|
||||
|
||||
@@ -25,14 +25,13 @@ func showValidator(cmd *cobra.Command, args []string) error {
|
||||
var (
|
||||
pubKey crypto.PubKey
|
||||
err error
|
||||
bctx = cmd.Context()
|
||||
)
|
||||
|
||||
//TODO: remove once gRPC is the only supported protocol
|
||||
protocol, _ := tmnet.ProtocolAndAddress(config.PrivValidator.ListenAddr)
|
||||
switch protocol {
|
||||
case "grpc":
|
||||
pvsc, err := tmgrpc.DialRemoteSigner(
|
||||
bctx,
|
||||
config.PrivValidator,
|
||||
config.ChainID(),
|
||||
logger,
|
||||
@@ -42,7 +41,7 @@ func showValidator(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("can't connect to remote validator %w", err)
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(bctx, ctxTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
pubKey, err = pvsc.GetPubKey(ctx)
|
||||
@@ -61,7 +60,7 @@ func showValidator(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(bctx, ctxTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
pubKey, err = pv.GetPubKey(ctx)
|
||||
|
||||
@@ -122,7 +122,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
genVals := make([]types.GenesisValidator, nValidators)
|
||||
ctx := cmd.Context()
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
|
||||
nodeDir := filepath.Join(outputDir, nodeDirName)
|
||||
@@ -139,7 +139,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := initFilesWithConfig(ctx, config); err != nil {
|
||||
if err := initFilesWithConfig(config); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -150,7 +150,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(ctx, ctxTimeout)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), ctxTimeout)
|
||||
defer cancel()
|
||||
|
||||
pubKey, err := pv.GetPubKey(ctx)
|
||||
@@ -181,7 +181,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := initFilesWithConfig(ctx, config); err != nil {
|
||||
if err := initFilesWithConfig(config); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -571,11 +571,9 @@ type P2PConfig struct { //nolint: maligned
|
||||
|
||||
// Comma separated list of seed nodes to connect to
|
||||
// We only use these if we can’t connect to peers in the addrbook
|
||||
//
|
||||
// Deprecated: This value is not used by the new PEX reactor. Use
|
||||
// BootstrapPeers instead.
|
||||
//
|
||||
// TODO(#5670): Remove once the p2p refactor is complete.
|
||||
// NOTE: not used by the new PEX reactor. Please use BootstrapPeers instead.
|
||||
// TODO: Remove once p2p refactor is complete
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5670
|
||||
Seeds string `mapstructure:"seeds"`
|
||||
|
||||
// Comma separated list of peers to be added to the peer store
|
||||
@@ -683,6 +681,7 @@ func TestP2PConfig() *P2PConfig {
|
||||
cfg.ListenAddress = "tcp://127.0.0.1:36656"
|
||||
cfg.AllowDuplicateIP = true
|
||||
cfg.FlushThrottleTimeout = 10 * time.Millisecond
|
||||
|
||||
return cfg
|
||||
}
|
||||
|
||||
|
||||
@@ -10,19 +10,21 @@ import (
|
||||
)
|
||||
|
||||
func TestDefaultConfig(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
// set up some defaults
|
||||
cfg := DefaultConfig()
|
||||
assert.NotNil(t, cfg.P2P)
|
||||
assert.NotNil(t, cfg.Mempool)
|
||||
assert.NotNil(t, cfg.Consensus)
|
||||
assert.NotNil(cfg.P2P)
|
||||
assert.NotNil(cfg.Mempool)
|
||||
assert.NotNil(cfg.Consensus)
|
||||
|
||||
// check the root dir stuff...
|
||||
cfg.SetRoot("/foo")
|
||||
cfg.Genesis = "bar"
|
||||
cfg.DBPath = "/opt/data"
|
||||
|
||||
assert.Equal(t, "/foo/bar", cfg.GenesisFile())
|
||||
assert.Equal(t, "/opt/data", cfg.DBDir())
|
||||
assert.Equal("/foo/bar", cfg.GenesisFile())
|
||||
assert.Equal("/opt/data", cfg.DBDir())
|
||||
}
|
||||
|
||||
func TestConfigValidateBasic(t *testing.T) {
|
||||
@@ -35,18 +37,19 @@ func TestConfigValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTLSConfiguration(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
cfg := DefaultConfig()
|
||||
cfg.SetRoot("/home/user")
|
||||
|
||||
cfg.RPC.TLSCertFile = "file.crt"
|
||||
assert.Equal(t, "/home/user/config/file.crt", cfg.RPC.CertFile())
|
||||
assert.Equal("/home/user/config/file.crt", cfg.RPC.CertFile())
|
||||
cfg.RPC.TLSKeyFile = "file.key"
|
||||
assert.Equal(t, "/home/user/config/file.key", cfg.RPC.KeyFile())
|
||||
assert.Equal("/home/user/config/file.key", cfg.RPC.KeyFile())
|
||||
|
||||
cfg.RPC.TLSCertFile = "/abs/path/to/file.crt"
|
||||
assert.Equal(t, "/abs/path/to/file.crt", cfg.RPC.CertFile())
|
||||
assert.Equal("/abs/path/to/file.crt", cfg.RPC.CertFile())
|
||||
cfg.RPC.TLSKeyFile = "/abs/path/to/file.key"
|
||||
assert.Equal(t, "/abs/path/to/file.key", cfg.RPC.KeyFile())
|
||||
assert.Equal("/abs/path/to/file.key", cfg.RPC.KeyFile())
|
||||
}
|
||||
|
||||
func TestBaseConfigValidateBasic(t *testing.T) {
|
||||
|
||||
@@ -9,7 +9,6 @@ import (
|
||||
"text/template"
|
||||
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
)
|
||||
|
||||
// DefaultDirPerm is the default permissions used when creating directories.
|
||||
@@ -550,7 +549,6 @@ func ResetTestRootWithChainID(testName string, chainID string) (*Config, error)
|
||||
}
|
||||
|
||||
config := TestConfig().SetRoot(rootDir)
|
||||
config.Instrumentation.Namespace = fmt.Sprintf("%s_%s_%s", testName, chainID, tmrand.Str(16))
|
||||
return config, nil
|
||||
}
|
||||
|
||||
@@ -571,10 +569,6 @@ var testGenesisFmt = `{
|
||||
"max_gas": "-1",
|
||||
"time_iota_ms": "10"
|
||||
},
|
||||
"synchrony": {
|
||||
"message_delay": "500000000",
|
||||
"precision": "10000000"
|
||||
},
|
||||
"evidence": {
|
||||
"max_age_num_blocks": "100000",
|
||||
"max_age_duration": "172800000000000",
|
||||
|
||||
@@ -14,24 +14,26 @@ func ensureFiles(t *testing.T, rootDir string, files ...string) {
|
||||
for _, f := range files {
|
||||
p := rootify(rootDir, f)
|
||||
_, err := os.Stat(p)
|
||||
assert.NoError(t, err, p)
|
||||
assert.Nil(t, err, p)
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureRoot(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
// setup temp dir for test
|
||||
tmpDir, err := os.MkdirTemp("", "config-test")
|
||||
require.NoError(t, err)
|
||||
require.NoError(err)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
|
||||
// create root dir
|
||||
EnsureRoot(tmpDir)
|
||||
|
||||
require.NoError(t, WriteConfigFile(tmpDir, DefaultConfig()))
|
||||
require.NoError(WriteConfigFile(tmpDir, DefaultConfig()))
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := os.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath))
|
||||
require.NoError(t, err)
|
||||
require.NoError(err)
|
||||
|
||||
checkConfig(t, string(data))
|
||||
|
||||
@@ -39,17 +41,19 @@ func TestEnsureRoot(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestEnsureTestRoot(t *testing.T) {
|
||||
require := require.New(t)
|
||||
|
||||
testName := "ensureTestRoot"
|
||||
|
||||
// create root dir
|
||||
cfg, err := ResetTestRoot(testName)
|
||||
require.NoError(t, err)
|
||||
require.NoError(err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
rootDir := cfg.RootDir
|
||||
|
||||
// make sure config is set properly
|
||||
data, err := os.ReadFile(filepath.Join(rootDir, defaultConfigFilePath))
|
||||
require.NoError(t, err)
|
||||
require.Nil(err)
|
||||
|
||||
checkConfig(t, string(data))
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ package crypto
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
)
|
||||
|
||||
@@ -26,9 +25,6 @@ type PubKey interface {
|
||||
VerifySignature(msg []byte, sig []byte) bool
|
||||
Equals(PubKey) bool
|
||||
Type() string
|
||||
|
||||
// Implementations must support tagged encoding in JSON.
|
||||
jsontypes.Tagged
|
||||
}
|
||||
|
||||
type PrivKey interface {
|
||||
@@ -37,9 +33,6 @@ type PrivKey interface {
|
||||
PubKey() PubKey
|
||||
Equals(PrivKey) bool
|
||||
Type() string
|
||||
|
||||
// Implementations must support tagged encoding in JSON.
|
||||
jsontypes.Tagged
|
||||
}
|
||||
|
||||
type Symmetric interface {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
|
||||
@@ -59,17 +58,11 @@ const (
|
||||
func init() {
|
||||
tmjson.RegisterType(PubKey{}, PubKeyName)
|
||||
tmjson.RegisterType(PrivKey{}, PrivKeyName)
|
||||
|
||||
jsontypes.MustRegister(PubKey{})
|
||||
jsontypes.MustRegister(PrivKey{})
|
||||
}
|
||||
|
||||
// PrivKey implements crypto.PrivKey.
|
||||
type PrivKey []byte
|
||||
|
||||
// TypeTag satisfies the jsontypes.Tagged interface.
|
||||
func (PrivKey) TypeTag() string { return PrivKeyName }
|
||||
|
||||
// Bytes returns the privkey byte format.
|
||||
func (privKey PrivKey) Bytes() []byte {
|
||||
return []byte(privKey)
|
||||
@@ -158,9 +151,6 @@ var _ crypto.PubKey = PubKey{}
|
||||
// PubKeyEd25519 implements crypto.PubKey for the Ed25519 signature scheme.
|
||||
type PubKey []byte
|
||||
|
||||
// TypeTag satisfies the jsontypes.Tagged interface.
|
||||
func (PubKey) TypeTag() string { return PubKeyName }
|
||||
|
||||
// Address is the SHA256-20 of the raw pubkey bytes.
|
||||
func (pubKey PubKey) Address() crypto.Address {
|
||||
if len(pubKey) != PubKeySize {
|
||||
|
||||
@@ -17,7 +17,7 @@ func TestSignAndValidateEd25519(t *testing.T) {
|
||||
|
||||
msg := crypto.CRandBytes(128)
|
||||
sig, err := privKey.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Test the signature
|
||||
assert.True(t, pubKey.VerifySignature(msg, sig))
|
||||
|
||||
@@ -28,13 +28,13 @@ func TestKeyPath(t *testing.T) {
|
||||
case KeyEncodingHex:
|
||||
rand.Read(keys[i])
|
||||
default:
|
||||
require.Fail(t, "Unexpected encoding")
|
||||
panic("Unexpected encoding")
|
||||
}
|
||||
path = path.AppendKey(keys[i], enc)
|
||||
}
|
||||
|
||||
res, err := KeyPathToKeys(path.String())
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
require.Equal(t, len(keys), len(res))
|
||||
|
||||
for i, key := range keys {
|
||||
|
||||
@@ -79,58 +79,58 @@ func TestProofOperators(t *testing.T) {
|
||||
// Good
|
||||
popz := ProofOperators([]ProofOperator{op1, op2, op3, op4})
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1"))
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, err)
|
||||
|
||||
// BAD INPUT
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1_WRONG")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
err = popz.VerifyValue(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", bz("INPUT1_WRONG"))
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD KEY 1
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY3/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD KEY 2
|
||||
err = popz.Verify(bz("OUTPUT4"), "KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD KEY 3
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1/", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD KEY 4
|
||||
err = popz.Verify(bz("OUTPUT4"), "//KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD KEY 5
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD OUTPUT 1
|
||||
err = popz.Verify(bz("OUTPUT4_WRONG"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD OUTPUT 2
|
||||
err = popz.Verify(bz(""), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD POPZ 1
|
||||
popz = []ProofOperator{op1, op2, op4}
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD POPZ 2
|
||||
popz = []ProofOperator{op4, op3, op2, op1}
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
|
||||
// BAD POPZ 3
|
||||
popz = []ProofOperator{}
|
||||
err = popz.Verify(bz("OUTPUT4"), "/KEY4/KEY2/KEY1", [][]byte{bz("INPUT1")})
|
||||
assert.Error(t, err)
|
||||
assert.NotNil(t, err)
|
||||
}
|
||||
|
||||
func bz(s string) []byte {
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
|
||||
secp256k1 "github.com/btcsuite/btcd/btcec"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
|
||||
// necessary for Bitcoin address format
|
||||
@@ -29,9 +28,6 @@ const (
|
||||
func init() {
|
||||
tmjson.RegisterType(PubKey{}, PubKeyName)
|
||||
tmjson.RegisterType(PrivKey{}, PrivKeyName)
|
||||
|
||||
jsontypes.MustRegister(PubKey{})
|
||||
jsontypes.MustRegister(PrivKey{})
|
||||
}
|
||||
|
||||
var _ crypto.PrivKey = PrivKey{}
|
||||
@@ -39,9 +35,6 @@ var _ crypto.PrivKey = PrivKey{}
|
||||
// PrivKey implements PrivKey.
|
||||
type PrivKey []byte
|
||||
|
||||
// TypeTag satisfies the jsontypes.Tagged interface.
|
||||
func (PrivKey) TypeTag() string { return PrivKeyName }
|
||||
|
||||
// Bytes marshalls the private key using amino encoding.
|
||||
func (privKey PrivKey) Bytes() []byte {
|
||||
return []byte(privKey)
|
||||
@@ -145,9 +138,6 @@ const PubKeySize = 33
|
||||
// This prefix is followed with the x-coordinate.
|
||||
type PubKey []byte
|
||||
|
||||
// TypeTag satisfies the jsontypes.Tagged interface.
|
||||
func (PubKey) TypeTag() string { return PubKeyName }
|
||||
|
||||
// Address returns a Bitcoin style addresses: RIPEMD160(SHA256(pubkey))
|
||||
func (pubKey PubKey) Address() crypto.Address {
|
||||
if len(pubKey) != PubKeySize {
|
||||
|
||||
@@ -52,7 +52,7 @@ func TestSignAndValidateSecp256k1(t *testing.T) {
|
||||
|
||||
msg := crypto.CRandBytes(128)
|
||||
sig, err := privKey.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
assert.True(t, pubKey.VerifySignature(msg, sig))
|
||||
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
package sr25519
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/internal/jsontypes"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
)
|
||||
import tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
|
||||
const (
|
||||
PrivKeyName = "tendermint/PrivKeySr25519"
|
||||
@@ -13,7 +10,4 @@ const (
|
||||
func init() {
|
||||
tmjson.RegisterType(PubKey{}, PubKeyName)
|
||||
tmjson.RegisterType(PrivKey{}, PrivKeyName)
|
||||
|
||||
jsontypes.MustRegister(PubKey{})
|
||||
jsontypes.MustRegister(PrivKey{})
|
||||
}
|
||||
|
||||
@@ -29,9 +29,6 @@ type PrivKey struct {
|
||||
kp *sr25519.KeyPair
|
||||
}
|
||||
|
||||
// TypeTag satisfies the jsontypes.Tagged interface.
|
||||
func (PrivKey) TypeTag() string { return PrivKeyName }
|
||||
|
||||
// Bytes returns the byte-encoded PrivKey.
|
||||
func (privKey PrivKey) Bytes() []byte {
|
||||
if privKey.kp == nil {
|
||||
|
||||
@@ -23,9 +23,6 @@ const (
|
||||
// PubKey implements crypto.PubKey.
|
||||
type PubKey []byte
|
||||
|
||||
// TypeTag satisfies the jsontypes.Tagged interface.
|
||||
func (PubKey) TypeTag() string { return PubKeyName }
|
||||
|
||||
// Address is the SHA256-20 of the raw pubkey bytes.
|
||||
func (pubKey PubKey) Address() crypto.Address {
|
||||
if len(pubKey) != PubKeySize {
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestSignAndValidateSr25519(t *testing.T) {
|
||||
|
||||
msg := crypto.CRandBytes(128)
|
||||
sig, err := privKey.Sign(msg)
|
||||
require.NoError(t, err)
|
||||
require.Nil(t, err)
|
||||
|
||||
// Test the signature
|
||||
assert.True(t, pubKey.VerifySignature(msg, sig))
|
||||
|
||||
@@ -4,61 +4,21 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func toHex(bits []byte) string {
|
||||
return hex.EncodeToString(bits)
|
||||
}
|
||||
|
||||
func fromHex(bits string) ([]byte, error) {
|
||||
func fromHex(bits string) []byte {
|
||||
b, err := hex.DecodeString(bits)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
panic(err)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
func check(t *testing.T, fn func(string) ([]byte, error), hex string) []byte {
|
||||
t.Helper()
|
||||
|
||||
res, err := fn(hex)
|
||||
require.NoError(t, err)
|
||||
return res
|
||||
return b
|
||||
}
|
||||
|
||||
func TestHChaCha20(t *testing.T) {
|
||||
var hChaCha20Vectors = []struct {
|
||||
key, nonce, keystream []byte
|
||||
}{
|
||||
{
|
||||
check(t, fromHex, "0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
check(t, fromHex, "000000000000000000000000000000000000000000000000"),
|
||||
check(t, fromHex, "1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"),
|
||||
},
|
||||
{
|
||||
check(t, fromHex, "8000000000000000000000000000000000000000000000000000000000000000"),
|
||||
check(t, fromHex, "000000000000000000000000000000000000000000000000"),
|
||||
check(t, fromHex, "7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"),
|
||||
},
|
||||
{
|
||||
check(t, fromHex, "0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
check(t, fromHex, "000000000000000000000000000000000000000000000002"),
|
||||
check(t, fromHex, "e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"),
|
||||
},
|
||||
{
|
||||
check(t, fromHex, "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
|
||||
check(t, fromHex, "000102030405060708090a0b0c0d0e0f1011121314151617"),
|
||||
check(t, fromHex, "51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"),
|
||||
},
|
||||
{
|
||||
check(t, fromHex, "24f11cce8a1b3d61e441561a696c1c1b7e173d084fd4812425435a8896a013dc"),
|
||||
check(t, fromHex, "d9660c5900ae19ddad28d6e06e45fe5e"),
|
||||
check(t, fromHex, "5966b3eec3bff1189f831f06afe4d4e3be97fa9235ec8c20d08acfbbb4e851e3"),
|
||||
},
|
||||
}
|
||||
|
||||
for i, v := range hChaCha20Vectors {
|
||||
var key [32]byte
|
||||
var nonce [16]byte
|
||||
@@ -72,6 +32,36 @@ func TestHChaCha20(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
var hChaCha20Vectors = []struct {
|
||||
key, nonce, keystream []byte
|
||||
}{
|
||||
{
|
||||
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
fromHex("000000000000000000000000000000000000000000000000"),
|
||||
fromHex("1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"),
|
||||
},
|
||||
{
|
||||
fromHex("8000000000000000000000000000000000000000000000000000000000000000"),
|
||||
fromHex("000000000000000000000000000000000000000000000000"),
|
||||
fromHex("7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"),
|
||||
},
|
||||
{
|
||||
fromHex("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||
fromHex("000000000000000000000000000000000000000000000002"),
|
||||
fromHex("e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"),
|
||||
},
|
||||
{
|
||||
fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
|
||||
fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"),
|
||||
fromHex("51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"),
|
||||
},
|
||||
{
|
||||
fromHex("24f11cce8a1b3d61e441561a696c1c1b7e173d084fd4812425435a8896a013dc"),
|
||||
fromHex("d9660c5900ae19ddad28d6e06e45fe5e"),
|
||||
fromHex("5966b3eec3bff1189f831f06afe4d4e3be97fa9235ec8c20d08acfbbb4e851e3"),
|
||||
},
|
||||
}
|
||||
|
||||
func TestVectors(t *testing.T) {
|
||||
for i, v := range vectors {
|
||||
if len(v.plaintext) == 0 {
|
||||
|
||||
@@ -18,7 +18,7 @@ func TestSimple(t *testing.T) {
|
||||
ciphertext := EncryptSymmetric(plaintext, secret)
|
||||
plaintext2, err := DecryptSymmetric(ciphertext, secret)
|
||||
|
||||
require.NoError(t, err, "%+v", err)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
assert.Equal(t, plaintext, plaintext2)
|
||||
}
|
||||
|
||||
@@ -35,6 +35,6 @@ func TestSimpleWithKDF(t *testing.T) {
|
||||
ciphertext := EncryptSymmetric(plaintext, secret)
|
||||
plaintext2, err := DecryptSymmetric(ciphertext, secret)
|
||||
|
||||
require.NoError(t, err, "%+v", err)
|
||||
require.Nil(t, err, "%+v", err)
|
||||
assert.Equal(t, plaintext, plaintext2)
|
||||
}
|
||||
|
||||
@@ -83,19 +83,19 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
defer cancel()
|
||||
|
||||
srv.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := srv.Start(ctx); err != nil {
|
||||
if err := srv.Start(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Run until shutdown.
|
||||
<-ctx.Done()
|
||||
srv.Wait()
|
||||
// Stop upon receiving SIGTERM or CTRL-C.
|
||||
tmos.TrapSignal(logger, func() {
|
||||
// Cleanup
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
// Run forever.
|
||||
select {}
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
@@ -101,4 +101,3 @@ Note the context/background should be written in the present tense.
|
||||
- [ADR-057: RPC](./adr-057-RPC.md)
|
||||
- [ADR-069: Node Initialization](./adr-069-flexible-node-initialization.md)
|
||||
- [ADR-071: Proposer-Based Timestamps](adr-071-proposer-based-timestamps.md)
|
||||
- [ADR-074: Migrate Timeout Parameters to Consensus Parameters](./adr-074-timeout-params.md)
|
||||
|
||||
@@ -1,203 +0,0 @@
|
||||
# ADR 74: Migrate Timeout Parameters to Consensus Parameters
|
||||
|
||||
## Changelog
|
||||
|
||||
- 03-Jan-2022: Initial draft (@williambanfield)
|
||||
- 13-Jan-2022: Updated to indicate work on upgrade path needed (@williambanfield)
|
||||
|
||||
## Status
|
||||
|
||||
Proposed
|
||||
|
||||
## Context
|
||||
|
||||
### Background
|
||||
|
||||
Tendermint's consensus timeout parameters are currently configured locally by each validator
|
||||
in the validator's [config.toml][config-toml].
|
||||
This means that the validators on a Tendermint network may have different timeouts
|
||||
from each other. There is no reason for validators on the same network to configure
|
||||
different timeout values. Proper functioning of the Tendermint consensus algorithm
|
||||
relies on these parameters being uniform across validators.
|
||||
|
||||
The configurable values are as follows:
|
||||
|
||||
* `TimeoutPropose`
|
||||
* How long the consensus algorithm waits for a proposal block before issuing a prevote.
|
||||
* If no prevote arrives by `TimeoutPropose`, then the consensus algorithm will issue a nil prevote.
|
||||
* `TimeoutProposeDelta`
|
||||
* How much the `TimeoutPropose` grows each round.
|
||||
* `TimeoutPrevote`
|
||||
* How long the consensus algorithm waits after receiving +2/3 prevotes with
|
||||
no quorum for a value before issuing a precommit for nil.
|
||||
(See the [arXiv paper][arxiv-paper], Algorithm 1, Line 34)
|
||||
* `TimeoutPrevoteDelta`
|
||||
* How much the `TimeoutPrevote` increases with each round.
|
||||
* `TimeoutPrecommit`
|
||||
* How long the consensus algorithm waits after receiving +2/3 precommits that
|
||||
do not have a quorum for a value before entering the next round.
|
||||
(See the [arXiv paper][arxiv-paper], Algorithm 1, Line 47)
|
||||
* `TimeoutPrecommitDelta`
|
||||
* How much the `TimeoutPrecommit` increases with each round.
|
||||
* `TimeoutCommit`
|
||||
* How long the consensus algorithm waits after committing a block but before starting the new height.
|
||||
* This gives a validator a chance to receive slow precommits.
|
||||
* `SkipTimeoutCommit`
|
||||
* Make progress as soon as the node has 100% of the precommits.
|
||||
|
||||
|
||||
### Overview of Change
|
||||
|
||||
We will consolidate the timeout parameters and migrate them from the node-local
|
||||
`config.toml` file into the network-global consensus parameters.
|
||||
|
||||
The 8 timeout parameters will be consolidated down to 6. These will be as follows:
|
||||
|
||||
* `TimeoutPropose`
|
||||
* Same as current `TimeoutPropose`.
|
||||
* `TimeoutProposeDelta`
|
||||
* Same as current `TimeoutProposeDelta`.
|
||||
* `TimeoutVote`
|
||||
* How long validators wait for votes in both the prevote
|
||||
and precommit phase of the consensus algorithm. This parameter subsumes
|
||||
the current `TimeoutPrevote` and `TimeoutPrecommit` parameters.
|
||||
* `TimeoutVoteDelta`
|
||||
* How much the `TimeoutVote` will grow each successive round.
|
||||
This parameter subsumes the current `TimeoutPrevoteDelta` and `TimeoutPrecommitDelta`
|
||||
parameters.
|
||||
* `TimeoutCommit`
|
||||
* Same as current `TimeoutCommit`.
|
||||
* `EnableTimeoutCommitBypass`
|
||||
* Same as current `SkipTimeoutCommit`, renamed for clarity.
|
||||
|
||||
A safe default will be provided by Tendermint for each of these parameters and
|
||||
networks will be able to update the parameters as they see fit. Local updates
|
||||
to these parameters will no longer be possible; instead, the application will control
|
||||
updating the parameters. Applications using the Cosmos SDK will be automatically be
|
||||
able to change the values of these consensus parameters [via a governance proposal][cosmos-sdk-consensus-params].
|
||||
|
||||
This change is low-risk. While parameters are locally configurable, many running chains
|
||||
do not change them from their default values. For example, initializing
|
||||
a node on Osmosis, Terra, and the Cosmos Hub using the their `init` command produces
|
||||
a `config.toml` with Tendermint's default values for these parameters.
|
||||
|
||||
### Why this parameter consolidation?
|
||||
|
||||
Reducing the number of parameters is good for UX. Fewer superfluous parameters makes
|
||||
running and operating a Tendermint network less confusing.
|
||||
|
||||
The Prevote and Precommit messages are both similar sizes, require similar amounts
|
||||
of processing so there is no strong need for them to be configured separately.
|
||||
|
||||
The `TimeoutPropose` parameter governs how long Tendermint will wait for the proposed
|
||||
block to be gossiped. Blocks are much larger than votes and therefore tend to be
|
||||
gossiped much more slowly. It therefore makes sense to keep `TimeoutPropose` and
|
||||
the `TimeoutProposeDelta` as parameters separate from the vote timeouts.
|
||||
|
||||
`TimeoutCommit` is used by chains to ensure that the network waits for the votes from
|
||||
slower validators before proceeding to the next height. Without this timeout, the votes
|
||||
from slower validators would consistently not be included in blocks and those validators
|
||||
would not be counted as 'up' from the chain's perspective. Being down damages a validator's
|
||||
reputation and causes potential stakers to think twice before delegating to that validator.
|
||||
|
||||
`TimeoutCommit` also prevents the network from producing the next height as soon as validators
|
||||
on the fastest hardware with a summed voting power of +2/3 of the network's total have
|
||||
completed execution of the block. Allowing the network to proceed as soon as the fastest
|
||||
+2/3 completed execution would have a cumulative effect over heights, eventually
|
||||
leaving slower validators unable to participate in consensus at all. `TimeoutCommit`
|
||||
therefore allows networks to have greater variability in hardware. Additional
|
||||
discussion of this can be found in [tendermint issue 5911][tendermint-issue-5911-comment]
|
||||
and [spec issue 359][spec-issue-359].
|
||||
|
||||
## Alternative Approaches
|
||||
|
||||
### Hardcode the parameters
|
||||
|
||||
Many Tendermint networks run on similar cloud-hosted infrastructure. Therefore,
|
||||
they have similar bandwidth and machine resources. The timings for propagating votes
|
||||
and blocks are likely to be reasonably similar across networks. As a result, the
|
||||
timeout parameters are good candidates for being hardcoded. Hardcoding the timeouts
|
||||
in Tendermint would mean entirely removing these parameters from any configuration
|
||||
that could be altered by either an application or a node operator. Instead,
|
||||
Tendermint would ship with a set of timeouts and all applications using Tendermint
|
||||
would use this exact same set of values.
|
||||
|
||||
While Tendermint nodes often run with similar bandwidth and on similar cloud-hosted
|
||||
machines, there are enough points of variability to make configuring
|
||||
consensus timeouts meaningful. Namely, Tendermint network topologies are likely to be
|
||||
very different from chain to chain. Additionally, applications may vary greatly in
|
||||
how long the `Commit` phase may take. Applications that perform more work during `Commit`
|
||||
require a longer `TimeoutCommit` to allow the application to complete its work
|
||||
and be prepared for the next height.
|
||||
|
||||
## Decision
|
||||
|
||||
The decision has been made to implement this work, with the caveat that the
|
||||
specific mechanism for introducing the new parameters to chains is still ongoing.
|
||||
|
||||
## Detailed Design
|
||||
|
||||
### New Consensus Parameters
|
||||
|
||||
A new `TimeoutParams` `message` will be added to the [params.proto file][consensus-params-proto].
|
||||
This message will have the following form:
|
||||
|
||||
```proto
|
||||
message TimeoutParams {
|
||||
google.protobuf.Duration propose = 1;
|
||||
google.protobuf.Duration propose_delta = 2;
|
||||
google.protobuf.Duration vote = 3;
|
||||
google.protobuf.Duration vote_delta = 4;
|
||||
google.protobuf.Duration commit = 5;
|
||||
bool enable_commit_timeout_bypass = 6;
|
||||
}
|
||||
```
|
||||
|
||||
This new message will be added as a field into the [`ConsensusParams`
|
||||
message][consensus-params-proto]. The same default values that are [currently
|
||||
set for these parameters][current-timeout-defaults] in the local configuration
|
||||
file will be used as the defaults for these new consensus parameters in the
|
||||
[consensus parameter defaults][default-consensus-params].
|
||||
|
||||
The new consensus parameters will be subject to the same
|
||||
[validity rules][time-param-validation] as the current configuration values,
|
||||
namely, each value must be non-negative.
|
||||
|
||||
### Migration
|
||||
|
||||
The new `ConsensusParameters` will be added during an upcoming release. In this
|
||||
release, the old `config.toml` parameters will cease to control the timeouts and
|
||||
an error will be logged on nodes that continue to specify these values. The specific
|
||||
mechanism by which these parameters will added to a chain is being discussed in
|
||||
[RFC-009][rfc-009] and will be decided ahead of the next release.
|
||||
|
||||
The specific mechanism for adding these parameters depends on work related to
|
||||
[soft upgrades][soft-upgrades], which is still ongoing.
|
||||
|
||||
## Consequences
|
||||
|
||||
### Positive
|
||||
|
||||
* Timeout parameters will be equal across all of the validators in a Tendermint network.
|
||||
* Remove superfluous timeout parameters.
|
||||
|
||||
### Negative
|
||||
|
||||
### Neutral
|
||||
|
||||
* Timeout parameters require consensus to change.
|
||||
|
||||
## References
|
||||
|
||||
[conseusus-params-proto]: https://github.com/tendermint/spec/blob/a00de7199f5558cdd6245bbbcd1d8405ccfb8129/proto/tendermint/types/params.proto#L11
|
||||
[hashed-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L49
|
||||
[default-consensus-params]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/types/params.go#L79
|
||||
[current-timeout-defaults]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L955
|
||||
[config-toml]: https://github.com/tendermint/tendermint/blob/5cc980698a3402afce76b26693ab54b8f67f038b/config/toml.go#L425-L440
|
||||
[cosmos-sdk-consensus-params]: https://github.com/cosmos/cosmos-sdk/issues/6197
|
||||
[time-param-validation]: https://github.com/tendermint/tendermint/blob/7cdf560173dee6773b80d1c574a06489d4c394fe/config/config.go#L1038
|
||||
[tendermint-issue-5911-comment]: https://github.com/tendermint/tendermint/issues/5911#issuecomment-973560381
|
||||
[spec-issue-359]: https://github.com/tendermint/spec/issues/359
|
||||
[arxiv-paper]: https://arxiv.org/pdf/1807.04938.pdf
|
||||
[soft-upgrades]: https://github.com/tendermint/spec/pull/222
|
||||
[rfc-009]: https://github.com/tendermint/tendermint/pull/7524
|
||||
@@ -61,7 +61,7 @@ Here are some relevant facts about TCP:
|
||||
|
||||

|
||||
|
||||
In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary.
|
||||
In order to have performant TCP connections under the conditions created in Tendermint, we've created the `mconnection`, or the multiplexing connection. It is our own protocol built on top of TCP. It lets us reuse TCP connections to minimize overhead, and it keeps the window size high by sending auxiliary messages when necessary.
|
||||
|
||||
The `mconnection` is represented by a struct, which contains a batch of messages, read and write buffers, and a map of channel IDs to reactors. It communicates with TCP via file descriptors, which it can write to. There is one `mconnection` per peer connection.
|
||||
|
||||
|
||||
@@ -44,7 +44,5 @@ sections.
|
||||
- [RFC-004: E2E Test Framework Enhancements](./rfc-004-e2e-framework.md)
|
||||
- [RFC-005: Event System](./rfc-005-event-system.rst)
|
||||
- [RFC-006: Event Subscription](./rfc-006-event-subscription.md)
|
||||
- [RFC-007: Deterministic Proto Byte Serialization](./rfc-007-deterministic-proto-bytes.md)
|
||||
- [RFC-008: Don't Panic](./rfc-008-don't-panic.md)
|
||||
|
||||
<!-- - [RFC-NNN: Title](./rfc-NNN-title.md) -->
|
||||
|
||||
@@ -1,140 +0,0 @@
|
||||
# RFC 007 : Deterministic Proto Byte Serialization
|
||||
|
||||
## Changelog
|
||||
|
||||
- 09-Dec-2021: Initial draft (@williambanfield).
|
||||
|
||||
## Abstract
|
||||
|
||||
This document discusses the issue of stable byte-representation of serialized messages
|
||||
within Tendermint and describes a few possible routes that could be taken to address it.
|
||||
|
||||
## Background
|
||||
|
||||
We use the byte representations of wire-format proto messages to produce
|
||||
and verify hashes of data within the Tendermint codebase as well as for
|
||||
producing and verifying cryptographic signatures over these signed bytes.
|
||||
|
||||
The protocol buffer [encoding spec][proto-spec-encoding] does not guarantee that the byte representation
|
||||
of a protocol buffer message will be the same between two calls to an encoder.
|
||||
While there is a mode to force the encoder to produce the same byte representation
|
||||
of messages within a single binary, these guarantees are not good enough for our
|
||||
use case in Tendermint. We require multiple different versions of a binary running
|
||||
Tendermint to be able to inter-operate. Additionally, we require that multiple different
|
||||
systems written in _different languages_ be able to participate in different aspects
|
||||
of the protocols of Tendermint and be able to verify the integrity of the messages
|
||||
they each produce.
|
||||
|
||||
While this has not yet created a problem that we know of in a running network, we should
|
||||
make sure to provide stronger guarantees around the serialized representation of the messages
|
||||
used within the Tendermint consensus algorithm to prevent any issue from occurring.
|
||||
|
||||
|
||||
## Discussion
|
||||
|
||||
Proto has the following points of variability that can produce non-deterministic byte representation:
|
||||
|
||||
1. Encoding order of fields within a message.
|
||||
|
||||
Proto allows fields to be encoded in any order and even be repeated.
|
||||
|
||||
2. Encoding order of elements of a repeated field.
|
||||
|
||||
`repeated` fields in a proto message can be serialized in any order.
|
||||
|
||||
3. Presence or absence of default values.
|
||||
|
||||
Types in proto have defined default values similar to Go's zero values.
|
||||
Writing or omitting a default value are both legal ways of encoding a wire message.
|
||||
|
||||
4. Serialization of 'unknown' fields.
|
||||
|
||||
Unknown fields can be present when a message is created by a binary with a newer
|
||||
version of the proto that contains fields that the deserializer in a different
|
||||
binary does not yet know about. Deserializers in binaries that do not know about the field
|
||||
will maintain the bytes of the unknown field but not place them into the deserialized structure.
|
||||
|
||||
We have a few options to consider when producing this stable representation.
|
||||
|
||||
### Options for deterministic byte representation
|
||||
|
||||
#### Use only compliant serializers and constrain field usage
|
||||
|
||||
According to [Cosmos-SDK ADR-27][cosmos-sdk-adr-27], when message types obey a simple
|
||||
set of rules, gogoproto produces a consistent byte representation of serialized messages.
|
||||
This seems promising, although more research is needed to guarantee gogoproto always
|
||||
produces a consistent set of bytes on serialized messages. This would solve the problem
|
||||
within Tendermint as written in Go, but would require ensuring that there are similar
|
||||
serializers written in other languages that produce the same output as gogoproto.
|
||||
|
||||
#### Reorder serialized bytes to ensure determinism.
|
||||
|
||||
The serialized form of a proto message can be transformed into a canonical representation
|
||||
by applying simple rules to the serialized bytes. Re-ordering the serialized bytes
|
||||
would allow Tendermint to produce a canonical byte representation without having to
|
||||
simultaneously maintain a custom proto marshaller.
|
||||
|
||||
This could be implemented as a function in many languages that performed the following
|
||||
producing bytes to sign or hashing:
|
||||
|
||||
1. Does not add any of the data from unknown fields into the type to hash.
|
||||
|
||||
Tendermint should not run into a case where it needs to verify the integrity of
|
||||
data with unknown fields for the following reasons:
|
||||
|
||||
The purpose of checking hash equality within Tendermint is to ensure that
|
||||
its local copy of data matches the data that the network agreed on. There should
|
||||
therefore not be a case where a process is checking hash equality using data that it did not expect
|
||||
to receive. What the data represent may be opaque to the process, such as when checking the
|
||||
transactions in a block, _but the process will still have expected to receive this data_,
|
||||
despite not understanding what their internal structure is. It's not clear what it would
|
||||
mean to verify that a block contains data that a process does not know about.
|
||||
|
||||
The same reasoning applies for signature verification within Tendermint. Processes
|
||||
verify that a digital signature signed over a set of bytes by locally reconstructing the
|
||||
data structure that the digital signature signed using the process's local data.
|
||||
|
||||
2. Reordered all message fields to be in tag-sorted order.
|
||||
|
||||
Tag-sorting top-level fields will place all fields of the same tag in a adjacent
|
||||
to eachother within the serialized representation.
|
||||
|
||||
3. Reordered the contents of all `repeated` fields to be in lexicographically sorted order.
|
||||
|
||||
`repeated` fields will appear in a message as having the same tag but will contain different
|
||||
contents. Therefore, lexicographical sorting will produce a stable ordering of
|
||||
fields with the same tag.
|
||||
|
||||
4. Deleted all default values from the byte representation.
|
||||
|
||||
Encoders can include default values or omit them. Most encoders appear to omit them
|
||||
but we may wish to delete them just to be safe.
|
||||
|
||||
5. Recursively performed these operations on any length-delimited subfields.
|
||||
|
||||
Length delimited fields may contain messages, strings, or just bytes. However,
|
||||
it's not possible to know what data is being represented by such a field.
|
||||
A 'string' may happen to have the same structure as an embedded message and we cannot
|
||||
disambiguate. For this reason, we must apply these same rules to all subfields that
|
||||
may contain messages. Because we cannot know if we have totally mangled the interior 'string'
|
||||
or not, this data should never be deserialized or used for anything beyond hashing.
|
||||
|
||||
A **prototype** implementation by @creachadair of this can be found in [the wirepb repo][wire-pb].
|
||||
This could be implemented in multiple languages more simply than ensuring that there are
|
||||
canonical proto serializers that match in each language.
|
||||
|
||||
### Future work
|
||||
|
||||
We should add clear documentation to the Tendermint codebase every time we
|
||||
compare hashes of proto messages or use proto serialized bytes to produces a
|
||||
digital signatures that we have been careful to ensure that the hashes are performed
|
||||
properly.
|
||||
|
||||
### References
|
||||
|
||||
[proto-spec-encoding]: https://developers.google.com/protocol-buffers/docs/encoding
|
||||
[spec-issue]: https://github.com/tendermint/tendermint/issues/5005
|
||||
[cosmos-sdk-adr-27]: https://github.com/cosmos/cosmos-sdk/blob/master/docs/architecture/adr-027-deterministic-protobuf-serialization.md
|
||||
[cer-proto-3]: https://github.com/regen-network/canonical-proto3
|
||||
[wire-pb]: https://github.com/creachadair/wirepb
|
||||
|
||||
@@ -1,139 +0,0 @@
|
||||
# RFC 008: Don't Panic
|
||||
|
||||
## Changelog
|
||||
|
||||
- 2021-12-17: initial draft (@tychoish)
|
||||
|
||||
## Abstract
|
||||
|
||||
Today, the Tendermint core codebase has panics in a number of cases as
|
||||
a response to exceptional situations. These panics complicate testing,
|
||||
and might make tendermint components difficult to use as a library in
|
||||
some circumstances. This document outlines a project of converting
|
||||
panics to errors and describes the situations where its safe to
|
||||
panic.
|
||||
|
||||
## Background
|
||||
|
||||
Panics in Go are a great mechanism for aborting the current execution
|
||||
for truly exceptional situations (e.g. memory errors, data corruption,
|
||||
processes initialization); however, because they resemble exceptions
|
||||
in other languages, it can be easy to over use them in the
|
||||
implementation of software architectures. This certainly happened in
|
||||
the history of Tendermint, and as we embark on the project of
|
||||
stabilizing the package, we find ourselves in the right moment to
|
||||
reexamine our use of panics, and largely where panics happen in the
|
||||
code base.
|
||||
|
||||
There are still some situations where panics are acceptable and
|
||||
desireable, but it's important that Tendermint, as a project, comes to
|
||||
consensus--perhaps in the text of this document--on the situations
|
||||
where it is acceptable to panic.
|
||||
|
||||
### References
|
||||
|
||||
- [Defer Panic and Recover](https://go.dev/blog/defer-panic-and-recover)
|
||||
- [Why Go gets exceptions right](https://dave.cheney.net/tag/panic)
|
||||
- [Don't panic](https://dave.cheney.net/practical-go/presentations/gophercon-singapore-2019.html#_dont_panic)
|
||||
|
||||
## Discussion
|
||||
|
||||
### Acceptable Panics
|
||||
|
||||
#### Initialization
|
||||
|
||||
It is unambiguously safe (and desireable) to panic in `init()`
|
||||
functions in response to any kind of error. These errors are caught by
|
||||
tests, and occur early enough in process initialization that they
|
||||
won't cause unexpected runtime crashes.
|
||||
|
||||
Other code that is called early in process initialization MAY panic,
|
||||
in some situations if it's not possible to return an error or cause
|
||||
the process to abort early, although these situations should be
|
||||
vanishingly slim.
|
||||
|
||||
#### Data Corruption
|
||||
|
||||
If Tendermint code encounters an inconsistency that could be
|
||||
attributed to data corruption or a logical impossibility it is safer
|
||||
to panic and crash the process than continue to attempt to make
|
||||
progress in these situations.
|
||||
|
||||
Examples including reading data out of the storage engine that
|
||||
is invalid or corrupt, or encountering an ambiguous situation where
|
||||
the process should halt. Generally these forms of corruption are
|
||||
detected after interacting with a trusted but external data source,
|
||||
and reflect situations where the author thinks its safer to terminate
|
||||
the process immediately rather than allow execution to continue.
|
||||
|
||||
#### Unrecoverable Consensus Failure
|
||||
|
||||
In general, a panic should be used in the case of unrecoverable
|
||||
consensus failures. If a process detects that the network is
|
||||
behaving in an incoherent way and it does not have a clearly defined
|
||||
and mechanism for recovering, the process should panic.
|
||||
|
||||
#### Static Validity
|
||||
|
||||
It is acceptable to panic for invariant violations, within a library
|
||||
or package, in situations that should be statically impossible,
|
||||
because there is no way to make these kinds of assertions at compile
|
||||
time.
|
||||
|
||||
For example, type-asserting `interface{}` values returned by
|
||||
`container/list` and `container/heap` (and similar), is acceptable,
|
||||
because package authors should have exclusive control of the inputs to
|
||||
these containers. Packages should not expose the ability to add
|
||||
arbitrary values to these data structures.
|
||||
|
||||
#### Controlled Panics Within Libraries
|
||||
|
||||
In some algorithms with highly recursive structures or very nested
|
||||
call patterns, using a panic, in combination with conditional recovery
|
||||
handlers results in more manageable code. Ultimately this is a limited
|
||||
application, and implementations that use panics internally should
|
||||
only recover conditionally, filtering out panics rather than ignoring
|
||||
or handling all panics.
|
||||
|
||||
#### Request Handling
|
||||
|
||||
Code that handles responses to incoming/external requests
|
||||
(e.g. `http.Handler`) should avoid panics, but practice this isn't
|
||||
totally possible, and it makes sense that request handlers have some
|
||||
kind of default recovery mechanism that will prevent one request from
|
||||
terminating a service.
|
||||
|
||||
### Unacceptable Panics
|
||||
|
||||
In **no** other situation is it acceptable for the code to panic:
|
||||
|
||||
- there should be **no** controlled panics that callers are required
|
||||
to handle across library/package boundaries.
|
||||
- callers of library functions should not expect panics.
|
||||
- ensuring that arbitrary go routines can't panic.
|
||||
- ensuring that there are no arbitrary panics in core production code,
|
||||
espically code that can run at any time during the lifetime of a
|
||||
process.
|
||||
- all test code and fixture should report normal test assertions with
|
||||
a mechanism like testify's `require` assertion rather than calling
|
||||
panic directly.
|
||||
|
||||
The goal of this increased "panic rigor" is to ensure that any escaped
|
||||
panic is reflects a fixable bug in Tendermint.
|
||||
|
||||
### Removing Panics
|
||||
|
||||
The process for removing panics involve a few steps, and will be part
|
||||
of an ongoing process of code modernization:
|
||||
|
||||
- converting existing explicit panics to errors in cases where it's
|
||||
possible to return an error, the errors can and should be handled, and returning
|
||||
an error would not lead to data corruption or cover up data
|
||||
corruption.
|
||||
|
||||
- increase rigor around operations that can cause runtime errors, like
|
||||
type assertions, nil pointer errors, array bounds access issues, and
|
||||
either avoid these situations or return errors where possible.
|
||||
|
||||
- remove generic panic handlers which could cover and hide known
|
||||
panics.
|
||||
@@ -1,128 +0,0 @@
|
||||
# RFC 009 : Consensus Parameter Upgrade Considerations
|
||||
|
||||
## Changelog
|
||||
|
||||
- 06-Jan-2011: Initial draft (@williambanfield).
|
||||
|
||||
## Abstract
|
||||
|
||||
This document discusses the challenges of adding additional consensus parameters
|
||||
to Tendermint and proposes a few solutions that can enable addition of consensus
|
||||
parameters in a backwards-compatible way.
|
||||
|
||||
## Background
|
||||
|
||||
This section provides an overview of the issues of adding consensus parameters
|
||||
to Tendermint.
|
||||
|
||||
### Hash Compatibility
|
||||
|
||||
Tendermint produces a hash of a subset of the consensus parameters. The values
|
||||
that are hashed currently are the `BlockMaxGas` and the `BlockMaxSize`. These
|
||||
are currently in the [HashedParams struct][hashed-params]. This hash is included
|
||||
in the block and validators use it to validate that their local view of the consensus
|
||||
parameters matches what the rest of the network is configured with.
|
||||
|
||||
Any new consensus parameters added to Tendermint should be included in this
|
||||
hash. This presents a challenge for verification of historical blocks when consensus
|
||||
parameters are added. If a network produced blocks with a version of Tendermint that
|
||||
did not yet have the new consensus parameters, the parameter hash it produced will
|
||||
not reference the new parameters. Any nodes joining the network with the newer
|
||||
version of Tendermint will have the new consensus parameters. Tendermint will need
|
||||
to handle this case so that new versions of Tendermint with new consensus parameters
|
||||
can still validate old blocks correctly without having to do anything overly complex
|
||||
or hacky.
|
||||
|
||||
### Allowing Developer-Defined Values and the `EndBlock` Problem
|
||||
|
||||
When new consensus parameters are added, application developers may wish to set
|
||||
values for them so that the developer-defined values may be used as soon as the
|
||||
software upgrades. We do not currently have a clean mechanism for handling this.
|
||||
|
||||
Consensus parameter updates are communicated from the application to Tendermint
|
||||
within `EndBlock` of some height `H` and take effect at the next height, `H+1`.
|
||||
This means that for updates that add a consensus parameter, there is a single
|
||||
height where the new parameters cannot take effect. The parameters did not exist
|
||||
in the version of the software that emitted the `EndBlock` response for height `H-1`,
|
||||
so they cannot take effect at height `H`. The first height that the updated params
|
||||
can take effect is height `H+1`. As of now, height `H` must run with the defaults.
|
||||
|
||||
## Discussion
|
||||
|
||||
### Hash Compatibility
|
||||
|
||||
This section discusses possible solutions to the problem of maintaining backwards-compatibility
|
||||
of hashed parameters while adding new parameters.
|
||||
|
||||
#### Never Hash Defaults
|
||||
|
||||
One solution to the problem of backwards-compatibility is to never include parameters
|
||||
in the hash if the are using the default value. This means that blocks produced
|
||||
before the parameters existed will have implicitly been created with the defaults.
|
||||
This works because any software with newer versions of Tendermint must be using the
|
||||
defaults for new parameters when validating old blocks since the defaults can not
|
||||
have been updated until a height at which the parameters existed.
|
||||
|
||||
#### Only Update HashedParams on Hash-Breaking Releases
|
||||
|
||||
An alternate solution to never hashing defaults is to not update the hashed
|
||||
parameters on non-hash-breaking releases. This means that when new consensus
|
||||
parameters are added to Tendermint, there may be a release that makes use of the
|
||||
parameters but does not verify that they are the same across all validators by
|
||||
referencing them in the hash. This seems reasonably safe given the fact that
|
||||
only a very far subset of the consensus parameters are currently verified at all.
|
||||
|
||||
#### Version The Consensus Parameter Hash Scheme
|
||||
|
||||
The upcoming work on [soft upgrades](https://github.com/tendermint/spec/pull/222)
|
||||
proposes applying different hashing rules depending on the active block version.
|
||||
The consensus parameter hash could be versioned in the same way. When different
|
||||
block versions are used, a different set of consensus parameters will be included
|
||||
in the hash.
|
||||
|
||||
### Developer Defined Values
|
||||
|
||||
This section discusses possible solutions to the problem of allowing application
|
||||
developers to define values for the new parameters during the upgrade that adds
|
||||
the parameters.
|
||||
|
||||
#### Using `InitChain` for New Values
|
||||
|
||||
One solution to the problem of allowing application developers to define values
|
||||
for new consensus parameters is to call the `InitChain` ABCI method on application
|
||||
startup and fetch the value for any new consensus parameters. The [response object][init-chain-response]
|
||||
contains a field for `ConsensusParameter` updates so this may serve as a natural place
|
||||
to put this logic.
|
||||
|
||||
This poses a few difficulties. Nodes replaying old blocks while running new
|
||||
software do not ever call `InitChain` after the initial time. They will therefore
|
||||
not have a way to determine that the parameters changed at some height by using a
|
||||
call to `InitChain`. The `EndBlock` response is how parameter changes at a height
|
||||
are currently communicated to Tendermint and conflating these cases seems risky.
|
||||
|
||||
#### Force Defaults For Single Height
|
||||
|
||||
An alternate option is to not use `InitChain` and instead require chains to use the
|
||||
default values of the new parameters for a single height.
|
||||
|
||||
As documented in the upcoming [ADR-74][adr-74], popular chains often simply use the default
|
||||
values. Additionally, great care is being taken to ensure that logic governed by upcoming
|
||||
consensus parameters is not liveness-breaking. This means that, at worst-case,
|
||||
chains will experience a single slow height while waiting for the new values to
|
||||
by applied.
|
||||
|
||||
#### Add a new `UpgradeChain` method
|
||||
|
||||
An additional method for allowing chains to update the consensus parameters that
|
||||
do not yet exist is to add a new `UpgradeChain` method to `ABCI`. The upgrade chain
|
||||
method would be called when the chain detects that the version of block that it
|
||||
is about to produce does not match the previous block. This method would be called
|
||||
after `EndBlock` and would return the set of consensus parameters to use at the
|
||||
next height. It would therefore give an application the chance to set the new
|
||||
consensus parameters before running a height with these new parameter.
|
||||
|
||||
### References
|
||||
|
||||
[hashed-params]: https://github.com/tendermint/tendermint/blob/0ae974e63911804d4a2007bd8a9b3ad81d6d2a90/types/params.go#L49
|
||||
[init-chain-response]: https://github.com/tendermint/tendermint/blob/0ae974e63911804d4a2007bd8a9b3ad81d6d2a90/abci/types/types.pb.go#L1616
|
||||
[adr-74]: https://github.com/tendermint/tendermint/pull/7503
|
||||
@@ -18,7 +18,7 @@ This section dives into the internals of Go-Tendermint.
|
||||
- [Mempool](./mempool/README.md)
|
||||
- [Light Client](./light-client.md)
|
||||
- [Consensus](./consensus/README.md)
|
||||
- [Peer Exchange (PEX)](./pex/README.md)
|
||||
- [Peer Exachange (PEX)](./pex/README.md)
|
||||
- [Evidence](./evidence/README.md)
|
||||
|
||||
For full specifications refer to the [spec repo](https://github.com/tendermint/spec).
|
||||
|
||||
24
go.mod
24
go.mod
@@ -3,7 +3,7 @@ module github.com/tendermint/tendermint
|
||||
go 1.17
|
||||
|
||||
require (
|
||||
github.com/BurntSushi/toml v1.0.0
|
||||
github.com/BurntSushi/toml v0.4.1
|
||||
github.com/adlio/schema v1.2.3
|
||||
github.com/btcsuite/btcd v0.22.0-beta
|
||||
github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce
|
||||
@@ -22,21 +22,21 @@ require (
|
||||
github.com/mroth/weightedrand v0.4.1
|
||||
github.com/oasisprotocol/curve25519-voi v0.0.0-20210609091139-0a56a4bca00b
|
||||
github.com/ory/dockertest v3.3.5+incompatible
|
||||
github.com/prometheus/client_golang v1.12.0
|
||||
github.com/prometheus/client_golang v1.11.0
|
||||
github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0
|
||||
github.com/rs/cors v1.8.2
|
||||
github.com/rs/zerolog v1.26.1
|
||||
github.com/rs/cors v1.8.0
|
||||
github.com/rs/zerolog v1.26.0
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa
|
||||
github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa
|
||||
github.com/spf13/cobra v1.3.0
|
||||
github.com/spf13/viper v1.10.1
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/viper v1.10.0
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/tendermint/tm-db v0.6.6
|
||||
github.com/vektra/mockery/v2 v2.9.4
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
|
||||
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.43.0
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
google.golang.org/grpc v1.42.0
|
||||
pgregory.net/rapid v0.4.7
|
||||
)
|
||||
|
||||
@@ -150,12 +150,13 @@ require (
|
||||
github.com/opencontainers/image-spec v1.0.2 // indirect
|
||||
github.com/opencontainers/runc v1.0.3 // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 // indirect
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/polyfloyd/go-errorlint v0.0.0-20210722154253-910bb7978349 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/common v0.30.0 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/quasilyte/go-ruleguard v0.3.13 // indirect
|
||||
github.com/quasilyte/regex/syntax v0.0.0-20200407221936-30656e2c4a95 // indirect
|
||||
@@ -189,13 +190,14 @@ require (
|
||||
github.com/yeya24/promlinter v0.1.0 // indirect
|
||||
go.etcd.io/bbolt v1.3.6 // indirect
|
||||
golang.org/x/mod v0.5.0 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d // indirect
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/tools v0.1.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
gopkg.in/ini.v1 v1.66.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
|
||||
53
go.sum
53
go.sum
@@ -63,9 +63,8 @@ github.com/Antonboom/nilnil v0.1.0/go.mod h1:PhHLvRPSghY5Y7mX4TW+BHZQYo1A8flE5H2
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v0.4.1 h1:GaI7EiDXDRfa8VshkTj7Fym7ha+y8/XxIgD2okUIjLw=
|
||||
github.com/BurntSushi/toml v0.4.1/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/toml v1.0.0 h1:dtDWrepsVPfW9H/4y7dDgFc2MBUSeJhlaDtK13CxFlU=
|
||||
github.com/BurntSushi/toml v1.0.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60=
|
||||
github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
|
||||
@@ -217,7 +216,6 @@ github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfc
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.1/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4=
|
||||
@@ -294,6 +292,8 @@ github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3n
|
||||
github.com/fzipp/gocyclo v0.3.1 h1:A9UeX3HJSXTBzvHzhqoYVuE0eAhe+aM8XBCCwsPMZOc=
|
||||
github.com/fzipp/gocyclo v0.3.1/go.mod h1:DJHO6AUmbdqj2ET4Z9iArSuwWgYDRryYt2wASxc7x3E=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do=
|
||||
github.com/go-critic/go-critic v0.6.1 h1:lS8B9LH/VVsvQQP7Ao5TJyQqteVKVs3E4dXiHMyubtI=
|
||||
github.com/go-critic/go-critic v0.6.1/go.mod h1:SdNCfU0yF3UBjtaZGw6586/WocupMOJuiqgom5DsQxM=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
@@ -311,6 +311,8 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.5/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
|
||||
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
|
||||
github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
@@ -512,7 +514,6 @@ github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFb
|
||||
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
|
||||
github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||
github.com/hashicorp/consul/api v1.11.0/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M=
|
||||
github.com/hashicorp/consul/api v1.12.0/go.mod h1:6pVBMo0ebnYdt2S3H87XhekM/HHrUoTD2XXb/VrZVy0=
|
||||
github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8=
|
||||
github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms=
|
||||
github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA=
|
||||
@@ -637,6 +638,7 @@ github.com/ldez/gomoddirectives v0.2.2 h1:p9/sXuNFArS2RLc+UpYZSI4KQwGMEDWC/LbtF5
|
||||
github.com/ldez/gomoddirectives v0.2.2/go.mod h1:cpgBogWITnCfRq2qGoDkKMEVSaarhdBr6g8G04uz6d0=
|
||||
github.com/ldez/tagliatelle v0.2.0 h1:693V8Bf1NdShJ8eu/s84QySA0J2VWBanVBa2WwXD/Wk=
|
||||
github.com/ldez/tagliatelle v0.2.0/go.mod h1:8s6WJQwEYHbKZDsp/LjArytKOG8qaMrKQQ3mFukHs88=
|
||||
github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw=
|
||||
github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
@@ -670,6 +672,7 @@ github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb
|
||||
github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84=
|
||||
github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
@@ -810,6 +813,8 @@ github.com/pelletier/go-toml v1.9.4 h1:tjENF6MfZAg8e4ZmZTeWaWiT2vXtsoO6+iuOjFhEC
|
||||
github.com/pelletier/go-toml v1.9.4/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/performancecopilot/speed/v4 v4.0.0/go.mod h1:qxrSyuDGrTOWfV+uKRFhfxw6h/4HXRGUiZiufxo49BM=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5 h1:q2e307iGHPdTGp0hoxKjt1H5pDo6utceo3dQVK3I5XQ=
|
||||
github.com/petermattis/goid v0.0.0-20180202154549-b0b1615b78e5/go.mod h1:jvVRKCrJTQWu0XVbaOlby/2lO20uSCHEMzzplHXte1o=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d h1:CdDQnGF8Nq9ocOS/xlSptM1N3BbrA6/kmaep5ggwaIA=
|
||||
github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw=
|
||||
github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc=
|
||||
@@ -831,9 +836,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg=
|
||||
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
@@ -845,9 +849,8 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=
|
||||
github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
@@ -877,16 +880,15 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U=
|
||||
github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU=
|
||||
github.com/rs/cors v1.8.0 h1:P2KMzcFwrPoSjkF1WLRPsp3UMLyql8L4v9hQpVeK5so=
|
||||
github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/xid v1.3.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg=
|
||||
github.com/rs/zerolog v1.18.0/go.mod h1:9nvC1axdVrAHcu/s9taAVfBuIdTZLVQmKQyvrUjF5+I=
|
||||
github.com/rs/zerolog v1.26.1 h1:/ihwxqH+4z8UxyI70wM1z9yCvkWcfz/a3mj48k/Zngc=
|
||||
github.com/rs/zerolog v1.26.1/go.mod h1:/wSSJWX7lVrsOwlbyTRSOJvqRlc+WjWlfes+CiJ+tmc=
|
||||
github.com/rs/zerolog v1.26.0 h1:ORM4ibhEZeTeQlCojCK2kPz1ogAY4bGs4tD+SaAdGaE=
|
||||
github.com/rs/zerolog v1.26.0/go.mod h1:yBiM87lvSqX8h0Ww4sdzNSkVYZ8dL2xjZJG1lAuGZEo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/ryancurrah/gomodguard v1.2.3 h1:ww2fsjqocGCAFamzvv/b8IsRduuHHeK2MHTcTxZTQX8=
|
||||
github.com/ryancurrah/gomodguard v1.2.3/go.mod h1:rYbA/4Tg5c54mV1sv4sQTP5WOPBcoLtnBZ7/TEhXAbg=
|
||||
github.com/ryanrolds/sqlclosecheck v0.3.0 h1:AZx+Bixh8zdUBxUA1NxbxVAS78vTPq4rCb8OUZI9xFw=
|
||||
@@ -894,9 +896,10 @@ github.com/ryanrolds/sqlclosecheck v0.3.0/go.mod h1:1gREqxyTGR3lVtpngyFo3hZAgk0K
|
||||
github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts=
|
||||
github.com/sagikazarmark/crypt v0.1.0/go.mod h1:B/mN0msZuINBtQ1zZLEQcegFJJf9vnYIR88KRMEuODE=
|
||||
github.com/sagikazarmark/crypt v0.3.0/go.mod h1:uD/D+6UF4SrIR1uGEv7bBNkNqLGqUr43MRiaGWX1Nig=
|
||||
github.com/sagikazarmark/crypt v0.4.0/go.mod h1:ALv2SRj7GxYV4HO9elxH9nS6M9gW+xDNxqmyJ6RfDFM=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6 h1:+6/hQIHKNJAUixEj6EmOngGIisyeI+T3335lYTyxRoA=
|
||||
github.com/sanposhiho/wastedassign/v2 v2.0.6/go.mod h1:KyZ0MWTwxxBmfwn33zh3k1dmsbF2ud9pAAGfoLfjhtI=
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa h1:0U2s5loxrTy6/VgfVoLuVLFJcURKLH49ie0zSch7gh4=
|
||||
github.com/sasha-s/go-deadlock v0.2.1-0.20190427202633-1595213edefa/go.mod h1:F73l+cr82YSh10GxyRI6qZiCgK64VaZjwesgfQ1/iLM=
|
||||
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
|
||||
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
|
||||
github.com/securego/gosec/v2 v2.9.1 h1:anHKLS/ApTYU6NZkKa/5cQqqcbKZURjvc+MtR++S4EQ=
|
||||
@@ -940,9 +943,8 @@ github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkU
|
||||
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
|
||||
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
|
||||
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
|
||||
github.com/spf13/cobra v1.2.1 h1:+KmjbUw1hriSNMF55oPrkZcb27aECyrj8V2ytv7kWDw=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/cobra v1.3.0 h1:R7cSvGu+Vv+qX0gW5R/85dx2kmmJT5z5NM8ifdYjdn0=
|
||||
github.com/spf13/cobra v1.3.0/go.mod h1:BrRVncBjOJa/eUcVVm9CE+oC6as8k+VYr4NY7WCi9V4=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
@@ -955,9 +957,8 @@ github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/y
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/spf13/viper v1.9.0/go.mod h1:+i6ajR7OX2XaiBkrcZJFK21htRk7eDeLg7+O6bhUPP4=
|
||||
github.com/spf13/viper v1.10.0 h1:mXH0UwHS4D2HwWZa75im4xIQynLfblmWV7qcWpfv0yk=
|
||||
github.com/spf13/viper v1.10.0/go.mod h1:SoyBPwAtKDzypXNDFKN5kzH7ppppbGZtls1UpIy5AsM=
|
||||
github.com/spf13/viper v1.10.1 h1:nuJZuYpG7gTj/XqiUwg8bA0cp1+M2mC3J4g5luUYBKk=
|
||||
github.com/spf13/viper v1.10.1/go.mod h1:IGlFPqhNAPKRxohIzWpI5QEy4kuI7tcl5WvR+8qy1rU=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1 h1:X4XDI7jstt3ySqGU86YGAURbxw3oTDPK9sPEi6YEwQ0=
|
||||
github.com/ssgreg/nlreturn/v2 v2.2.1/go.mod h1:E/iiPB78hV7Szg2YfRgyIrk1AD6JVMTRkkxBiELzh2I=
|
||||
github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw=
|
||||
@@ -1009,7 +1010,9 @@ github.com/tommy-muehle/go-mnd/v2 v2.4.0/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr
|
||||
github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM=
|
||||
github.com/tv42/httpunix v0.0.0-20191220191345-2ba4b9c3382c/go.mod h1:hzIxponao9Kjc7aWznkXaL4U4TWaDSs8zcsY4Ka08nM=
|
||||
github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
|
||||
github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
|
||||
github.com/ultraware/whitespace v0.0.4 h1:If7Va4cM03mpgrNH9k49/VOicWpGoG70XPBFFODYDsg=
|
||||
@@ -1101,9 +1104,8 @@ golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5
|
||||
golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210915214749-c084706c2272/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e h1:1SzTfNOXwIS2oWiMF+6qu0OUDKb0dauo6MoDUQyu+yU=
|
||||
golang.org/x/crypto v0.0.0-20211215165025-cf75a172585e/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -1258,6 +1260,7 @@ golang.org/x/sys v0.0.0-20190606203320-7fc4e5ec1444/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
@@ -1331,10 +1334,8 @@ golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211013075003-97ac67df715c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d h1:FjkYO/PPp4Wi0EAUOVLxePm7qVW4r4ctbWpURyuOD0E=
|
||||
golang.org/x/sys v0.0.0-20211205182925-97ca703d548d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
@@ -1502,7 +1503,6 @@ google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdr
|
||||
google.golang.org/api v0.59.0/go.mod h1:sT2boj7M9YJxZzgeZqXogmhfmRWDtPzT31xkieUbuZU=
|
||||
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
|
||||
google.golang.org/api v0.62.0/go.mod h1:dKmwPCydfsad4qCH08MSdgWjfHOyfpd4VtDGgRFdavw=
|
||||
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
@@ -1613,9 +1613,8 @@ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
@@ -1643,6 +1642,8 @@ gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qS
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o=
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE=
|
||||
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
|
||||
@@ -236,7 +236,7 @@ func (pool *BlockPool) PopRequest() {
|
||||
|
||||
if r := pool.requesters[pool.height]; r != nil {
|
||||
if err := r.Stop(); err != nil {
|
||||
pool.logger.Error("error stopping requester", "err", err)
|
||||
pool.logger.Error("Error stopping requester", "err", err)
|
||||
}
|
||||
delete(pool.requesters, pool.height)
|
||||
pool.height++
|
||||
@@ -418,14 +418,14 @@ func (pool *BlockPool) makeNextRequester(ctx context.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
request := newBPRequester(pool.logger, pool, nextHeight)
|
||||
request := newBPRequester(pool, nextHeight)
|
||||
|
||||
pool.requesters[nextHeight] = request
|
||||
atomic.AddInt32(&pool.numPending, 1)
|
||||
|
||||
err := request.Start(ctx)
|
||||
if err != nil {
|
||||
request.logger.Error("error starting request", "err", err)
|
||||
request.logger.Error("Error starting request", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -565,7 +565,7 @@ type bpRequester struct {
|
||||
block *types.Block
|
||||
}
|
||||
|
||||
func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequester {
|
||||
func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
bpr := &bpRequester{
|
||||
logger: pool.logger,
|
||||
pool: pool,
|
||||
@@ -576,7 +576,7 @@ func newBPRequester(logger log.Logger, pool *BlockPool, height int64) *bpRequest
|
||||
peerID: "",
|
||||
block: nil,
|
||||
}
|
||||
bpr.BaseService = *service.NewBaseService(logger, "bpRequester", bpr)
|
||||
bpr.BaseService = *service.NewBaseService(nil, "bpRequester", bpr)
|
||||
return bpr
|
||||
}
|
||||
|
||||
@@ -677,7 +677,7 @@ OUTER_LOOP:
|
||||
return
|
||||
case <-bpr.pool.exitedCh:
|
||||
if err := bpr.Stop(); err != nil {
|
||||
bpr.logger.Error("error stopped requester", "err", err)
|
||||
bpr.logger.Error("Error stopped requester", "err", err)
|
||||
}
|
||||
return
|
||||
case peerID := <-bpr.redoCh:
|
||||
|
||||
@@ -6,16 +6,15 @@ import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmsync "github.com/tendermint/tendermint/libs/sync"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -76,7 +75,7 @@ type Reactor struct {
|
||||
store *store.BlockStore
|
||||
pool *BlockPool
|
||||
consReactor consensusReactor
|
||||
blockSync *atomicBool
|
||||
blockSync *tmsync.AtomicBool
|
||||
|
||||
blockSyncCh *p2p.Channel
|
||||
// blockSyncOutBridgeCh defines a channel that acts as a bridge between sending Envelope
|
||||
@@ -97,27 +96,23 @@ type Reactor struct {
|
||||
// stopping the p2p Channel(s).
|
||||
poolWG sync.WaitGroup
|
||||
|
||||
metrics *consensus.Metrics
|
||||
eventBus *eventbus.EventBus
|
||||
metrics *consensus.Metrics
|
||||
|
||||
syncStartTime time.Time
|
||||
}
|
||||
|
||||
// NewReactor returns new reactor instance.
|
||||
func NewReactor(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
state sm.State,
|
||||
blockExec *sm.BlockExecutor,
|
||||
store *store.BlockStore,
|
||||
consReactor consensusReactor,
|
||||
channelCreator p2p.ChannelCreator,
|
||||
blockSyncCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
blockSync bool,
|
||||
metrics *consensus.Metrics,
|
||||
eventBus *eventbus.EventBus,
|
||||
) (*Reactor, error) {
|
||||
|
||||
if state.LastBlockHeight != store.Height() {
|
||||
return nil, fmt.Errorf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())
|
||||
}
|
||||
@@ -130,11 +125,6 @@ func NewReactor(
|
||||
requestsCh := make(chan BlockRequest, maxTotalRequesters)
|
||||
errorsCh := make(chan peerError, maxPeerErrBuffer) // NOTE: The capacity should be larger than the peer count.
|
||||
|
||||
blockSyncCh, err := channelCreator(ctx, GetChannelDescriptor())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r := &Reactor{
|
||||
logger: logger,
|
||||
initialState: state,
|
||||
@@ -142,14 +132,13 @@ func NewReactor(
|
||||
store: store,
|
||||
pool: NewBlockPool(logger, startHeight, requestsCh, errorsCh),
|
||||
consReactor: consReactor,
|
||||
blockSync: newAtomicBool(blockSync),
|
||||
blockSync: tmsync.NewBool(blockSync),
|
||||
requestsCh: requestsCh,
|
||||
errorsCh: errorsCh,
|
||||
blockSyncCh: blockSyncCh,
|
||||
blockSyncOutBridgeCh: make(chan p2p.Envelope),
|
||||
peerUpdates: peerUpdates,
|
||||
metrics: metrics,
|
||||
eventBus: eventBus,
|
||||
syncStartTime: time.Time{},
|
||||
}
|
||||
|
||||
@@ -358,6 +347,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.logger.Debug("stopped listening on peer updates channel; closing...")
|
||||
return
|
||||
case peerUpdate := <-r.peerUpdates.Updates():
|
||||
r.processPeerUpdate(peerUpdate)
|
||||
@@ -523,15 +513,8 @@ FOR_LOOP:
|
||||
didProcessCh <- struct{}{}
|
||||
}
|
||||
|
||||
firstParts, err := first.MakePartSet(types.BlockPartSizeBytes)
|
||||
if err != nil {
|
||||
r.logger.Error("failed to make ",
|
||||
"height", first.Height,
|
||||
"err", err.Error())
|
||||
break FOR_LOOP
|
||||
}
|
||||
|
||||
var (
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstPartSetHeader = firstParts.Header()
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
)
|
||||
@@ -541,7 +524,8 @@ FOR_LOOP:
|
||||
// NOTE: We can probably make this more efficient, but note that calling
|
||||
// first.Hash() doesn't verify the tx contents, so MakePartSet() is
|
||||
// currently necessary.
|
||||
if err = state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit); err != nil {
|
||||
err := state.Validators.VerifyCommitLight(chainID, firstID, first.Height, second.LastCommit)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("invalid last commit: %w", err)
|
||||
r.logger.Error(
|
||||
err.Error(),
|
||||
@@ -641,32 +625,3 @@ func (r *Reactor) GetRemainingSyncTime() time.Duration {
|
||||
|
||||
return time.Duration(int64(remain * float64(time.Second)))
|
||||
}
|
||||
|
||||
func (r *Reactor) PublishStatus(ctx context.Context, event types.EventDataBlockSyncStatus) error {
|
||||
if r.eventBus == nil {
|
||||
return errors.New("event bus is not configured")
|
||||
}
|
||||
return r.eventBus.PublishEventBlockSyncStatus(ctx, event)
|
||||
}
|
||||
|
||||
// atomicBool is an atomic Boolean, safe for concurrent use by multiple
|
||||
// goroutines.
|
||||
type atomicBool int32
|
||||
|
||||
// newAtomicBool creates an atomicBool with given initial value.
|
||||
func newAtomicBool(ok bool) *atomicBool {
|
||||
ab := new(atomicBool)
|
||||
if ok {
|
||||
ab.Set()
|
||||
}
|
||||
return ab
|
||||
}
|
||||
|
||||
// Set sets the Boolean to true.
|
||||
func (ab *atomicBool) Set() { atomic.StoreInt32((*int32)(ab), 1) }
|
||||
|
||||
// UnSet sets the Boolean to false.
|
||||
func (ab *atomicBool) UnSet() { atomic.StoreInt32((*int32)(ab), 0) }
|
||||
|
||||
// IsSet returns whether the Boolean is true.
|
||||
func (ab *atomicBool) IsSet() bool { return atomic.LoadInt32((*int32)(ab))&1 == 1 }
|
||||
|
||||
@@ -136,7 +136,6 @@ func (rts *reactorTestSuite) addNode(
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
|
||||
vote, err := factory.MakeVote(
|
||||
ctx,
|
||||
privVal,
|
||||
lastBlock.Header.ChainID, 0,
|
||||
lastBlock.Header.Height, 0, 2,
|
||||
@@ -144,6 +143,7 @@ func (rts *reactorTestSuite) addNode(
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastCommit = types.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
@@ -152,10 +152,8 @@ func (rts *reactorTestSuite) addNode(
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock, err := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
require.NoError(t, err)
|
||||
thisParts, err := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(ctx, state, blockID, thisBlock)
|
||||
@@ -167,23 +165,16 @@ func (rts *reactorTestSuite) addNode(
|
||||
rts.peerChans[nodeID] = make(chan p2p.PeerUpdate)
|
||||
rts.peerUpdates[nodeID] = p2p.NewPeerUpdates(rts.peerChans[nodeID], 1)
|
||||
rts.network.Nodes[nodeID].PeerManager.Register(ctx, rts.peerUpdates[nodeID])
|
||||
|
||||
chCreator := func(ctx context.Context, chdesc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
return rts.blockSyncChannels[nodeID], nil
|
||||
}
|
||||
rts.reactors[nodeID], err = NewReactor(
|
||||
ctx,
|
||||
rts.logger.With("nodeID", nodeID),
|
||||
state.Copy(),
|
||||
blockExec,
|
||||
blockStore,
|
||||
nil,
|
||||
chCreator,
|
||||
rts.blockSyncChannels[nodeID],
|
||||
rts.peerUpdates[nodeID],
|
||||
rts.blockSync,
|
||||
consensus.NopMetrics(),
|
||||
nil, // eventbus, can be nil
|
||||
)
|
||||
consensus.NopMetrics())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, rts.reactors[nodeID].Start(ctx))
|
||||
@@ -207,8 +198,7 @@ func TestReactor_AbruptDisconnect(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
|
||||
maxBlockHeight := int64(64)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
@@ -247,8 +237,7 @@ func TestReactor_SyncTime(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
|
||||
maxBlockHeight := int64(101)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
@@ -273,10 +262,10 @@ func TestReactor_NoBlockResponse(t *testing.T) {
|
||||
|
||||
cfg, err := config.ResetTestRoot("block_sync_reactor_test")
|
||||
require.NoError(t, err)
|
||||
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
|
||||
maxBlockHeight := int64(65)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0}, 0)
|
||||
@@ -328,8 +317,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
defer os.RemoveAll(cfg.RootDir)
|
||||
|
||||
maxBlockHeight := int64(48)
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc, privVals := factory.RandGenesisDoc(cfg, 1, false, 30)
|
||||
|
||||
rts := setup(ctx, t, genDoc, privVals[0], []int64{maxBlockHeight, 0, 0, 0, 0}, 1000)
|
||||
|
||||
@@ -363,8 +351,7 @@ func TestReactor_BadBlockStopsPeer(t *testing.T) {
|
||||
//
|
||||
// XXX: This causes a potential race condition.
|
||||
// See: https://github.com/tendermint/tendermint/issues/6005
|
||||
valSet, otherPrivVals := factory.ValidatorSet(ctx, t, 1, 30)
|
||||
otherGenDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
otherGenDoc, otherPrivVals := factory.RandGenesisDoc(cfg, 1, false, 30)
|
||||
newNode := rts.network.MakeNode(ctx, t, p2ptest.NodeOptions{
|
||||
MaxPeers: uint16(len(rts.nodes) + 1),
|
||||
MaxConnected: uint16(len(rts.nodes) + 1),
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"path"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -31,11 +30,7 @@ import (
|
||||
// Byzantine node sends two different prevotes (nil and blockID) to the same
|
||||
// validator.
|
||||
func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
// empirically, this test either passes in <1s or hits some
|
||||
// kind of deadlock and hit the larger timeout. This timeout
|
||||
// can be extended a bunch if needed, but it's good to avoid
|
||||
// falling back to a much coarser timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
config := configSetup(t)
|
||||
@@ -46,8 +41,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newKVStore
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, nValidators, 30)
|
||||
genDoc := factory.GenesisDoc(config, time.Now(), valSet.Validators, nil)
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, 30)
|
||||
states := make([]*State, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
@@ -64,8 +58,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
|
||||
ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc(t, logger)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
@@ -74,8 +68,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(sync.Mutex)
|
||||
proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app)
|
||||
proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app)
|
||||
proxyAppConnMem := abciclient.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abciclient.NewLocalClient(mtx, app)
|
||||
|
||||
// Make Mempool
|
||||
mempool := mempool.NewTxMempool(
|
||||
@@ -98,7 +92,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
cs := NewState(ctx, logger, thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool)
|
||||
// set private validator
|
||||
pv := privVals[i]
|
||||
cs.SetPrivValidator(ctx, pv)
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
|
||||
err = eventBus.Start(ctx)
|
||||
@@ -131,20 +125,21 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
bzNodeState.doPrevote = func(ctx context.Context, height int64, round int32) {
|
||||
// allow first height to happen normally so that byzantine validator is no longer proposer
|
||||
if height == prevoteHeight {
|
||||
prevote1, err := bzNodeState.signVote(ctx,
|
||||
prevote1, err := bzNodeState.signVote(
|
||||
tmproto.PrevoteType,
|
||||
bzNodeState.ProposalBlock.Hash(),
|
||||
bzNodeState.ProposalBlockParts.Header(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
prevote2, err := bzNodeState.signVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
prevote2, err := bzNodeState.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// send two votes to all peers (1st to one half, 2nd to another half)
|
||||
i := 0
|
||||
for _, ps := range bzReactor.peers {
|
||||
if i < len(bzReactor.peers)/2 {
|
||||
bzNodeState.logger.Info("signed and pushed vote", "vote", prevote1, "peer", ps.peerID)
|
||||
require.NoError(t, bzReactor.voteCh.Send(ctx,
|
||||
p2p.Envelope{
|
||||
To: ps.peerID,
|
||||
@@ -153,6 +148,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
},
|
||||
}))
|
||||
} else {
|
||||
bzNodeState.logger.Info("signed and pushed vote", "vote", prevote2, "peer", ps.peerID)
|
||||
require.NoError(t, bzReactor.voteCh.Send(ctx,
|
||||
p2p.Envelope{
|
||||
To: ps.peerID,
|
||||
@@ -165,6 +161,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
i++
|
||||
}
|
||||
} else {
|
||||
bzNodeState.logger.Info("behaving normally")
|
||||
bzNodeState.defaultDoPrevote(ctx, height, round)
|
||||
}
|
||||
}
|
||||
@@ -175,7 +172,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
// lazyProposer := states[1]
|
||||
lazyNodeState := states[1]
|
||||
|
||||
lazyNodeState.decideProposal = func(ctx context.Context, height int64, round int32) {
|
||||
lazyNodeState.decideProposal = func(height int64, round int32) {
|
||||
lazyNodeState.logger.Info("Lazy Proposer proposing condensed commit")
|
||||
require.NotNil(t, lazyNodeState.privValidator)
|
||||
|
||||
var commit *types.Commit
|
||||
@@ -198,25 +196,24 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
if lazyNodeState.privValidatorPubKey == nil {
|
||||
// If this node is a validator & proposer in the current round, it will
|
||||
// miss the opportunity to create a block.
|
||||
lazyNodeState.logger.Error("enterPropose", "err", errPubKeyIsNotSet)
|
||||
lazyNodeState.logger.Error(fmt.Sprintf("enterPropose: %v", errPubKeyIsNotSet))
|
||||
return
|
||||
}
|
||||
proposerAddr := lazyNodeState.privValidatorPubKey.Address()
|
||||
|
||||
block, blockParts, err := lazyNodeState.blockExec.CreateProposalBlock(
|
||||
block, blockParts := lazyNodeState.blockExec.CreateProposalBlock(
|
||||
lazyNodeState.Height, lazyNodeState.state, commit, proposerAddr,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Flush the WAL. Otherwise, we may not recompute the same proposal to sign,
|
||||
// and the privValidator will refuse to sign anything.
|
||||
if err := lazyNodeState.wal.FlushAndSync(); err != nil {
|
||||
lazyNodeState.logger.Error("error flushing to disk")
|
||||
lazyNodeState.logger.Error("Error flushing to disk")
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID, block.Header.Time)
|
||||
proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID)
|
||||
p := proposal.ToProto()
|
||||
if err := lazyNodeState.privValidator.SignProposal(ctx, lazyNodeState.state.ChainID, p); err == nil {
|
||||
proposal.Signature = p.Signature
|
||||
@@ -229,6 +226,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
lazyNodeState.Height, lazyNodeState.Round, part,
|
||||
}, ""})
|
||||
}
|
||||
lazyNodeState.logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal)
|
||||
lazyNodeState.logger.Debug(fmt.Sprintf("Signed proposal block: %v", block))
|
||||
} else if !lazyNodeState.replayMode {
|
||||
lazyNodeState.logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err)
|
||||
}
|
||||
@@ -256,12 +255,10 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
|
||||
msg, err := s.Next(ctx)
|
||||
assert.NoError(t, err)
|
||||
if err != nil {
|
||||
if !assert.NoError(t, err) {
|
||||
cancel()
|
||||
return
|
||||
}
|
||||
|
||||
require.NotNil(t, msg)
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
if len(block.Evidence.Evidence) != 0 {
|
||||
@@ -280,11 +277,12 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
for idx, ev := range evidenceFromEachValidator {
|
||||
require.NotNil(t, ev, idx)
|
||||
ev, ok := ev.(*types.DuplicateVoteEvidence)
|
||||
require.True(t, ok)
|
||||
assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
|
||||
assert.Equal(t, prevoteHeight, ev.Height())
|
||||
if assert.NotNil(t, ev, idx) {
|
||||
ev, ok := ev.(*types.DuplicateVoteEvidence)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
|
||||
assert.Equal(t, prevoteHeight, ev.Height())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,6 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -24,7 +23,7 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
config := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := makeConsensusState(ctx, t,
|
||||
states, cleanup := randConsensusState(ctx, t,
|
||||
config, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
@@ -108,7 +107,7 @@ func invalidDoPrevoteFunc(
|
||||
ValidatorIndex: valIndex,
|
||||
Height: cs.Height,
|
||||
Round: cs.Round,
|
||||
Timestamp: tmtime.Now(),
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: types.BlockID{
|
||||
Hash: blockHash,
|
||||
@@ -124,6 +123,7 @@ func invalidDoPrevoteFunc(
|
||||
cs.mtx.Unlock()
|
||||
|
||||
for _, ps := range r.peers {
|
||||
cs.logger.Info("sending bad vote", "block", blockHash, "peer", ps.peerID)
|
||||
require.NoError(t, r.voteCh.Send(ctx, p2p.Envelope{
|
||||
To: ps.peerID,
|
||||
Message: &tmcons.Vote{
|
||||
|
||||
@@ -22,11 +22,8 @@ import (
|
||||
)
|
||||
|
||||
// for testing
|
||||
func assertMempool(t *testing.T, txn txNotifier) mempool.Mempool {
|
||||
t.Helper()
|
||||
mp, ok := txn.(mempool.Mempool)
|
||||
require.True(t, ok)
|
||||
return mp
|
||||
func assertMempool(txn txNotifier) mempool.Mempool {
|
||||
return txn.(mempool.Mempool)
|
||||
}
|
||||
|
||||
func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
@@ -40,21 +37,19 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
|
||||
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{
|
||||
Validators: 1,
|
||||
Power: 10})
|
||||
cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
|
||||
assertMempool(t, cs.txNotifier).EnableTxsAvailable()
|
||||
state, privVals := randGenesisState(baseConfig, 1, false, 10)
|
||||
cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock)
|
||||
startTestRound(ctx, cs, height, round)
|
||||
|
||||
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
|
||||
ensureNoNewEventOnChannel(t, newBlockCh)
|
||||
deliverTxsRange(ctx, t, cs, 0, 1)
|
||||
ensureNewEventOnChannel(t, newBlockCh) // commit txs
|
||||
ensureNewEventOnChannel(t, newBlockCh) // commit updated app hash
|
||||
ensureNoNewEventOnChannel(t, newBlockCh)
|
||||
ensureNewEventOnChannel(newBlockCh) // first block gets committed
|
||||
ensureNoNewEventOnChannel(newBlockCh)
|
||||
deliverTxsRange(ctx, cs, 0, 1)
|
||||
ensureNewEventOnChannel(newBlockCh) // commit txs
|
||||
ensureNewEventOnChannel(newBlockCh) // commit updated app hash
|
||||
ensureNoNewEventOnChannel(newBlockCh)
|
||||
}
|
||||
|
||||
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
@@ -67,19 +62,17 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
|
||||
|
||||
config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
|
||||
state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{
|
||||
Validators: 1,
|
||||
Power: 10})
|
||||
cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
|
||||
state, privVals := randGenesisState(baseConfig, 1, false, 10)
|
||||
cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
|
||||
|
||||
assertMempool(t, cs.txNotifier).EnableTxsAvailable()
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
|
||||
newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock)
|
||||
startTestRound(ctx, cs, cs.Height, cs.Round)
|
||||
|
||||
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
|
||||
ensureNoNewEventOnChannel(t, newBlockCh) // then we dont make a block ...
|
||||
ensureNewEventOnChannel(t, newBlockCh) // until the CreateEmptyBlocksInterval has passed
|
||||
ensureNewEventOnChannel(newBlockCh) // first block gets committed
|
||||
ensureNoNewEventOnChannel(newBlockCh) // then we dont make a block ...
|
||||
ensureNewEventOnChannel(newBlockCh) // until the CreateEmptyBlocksInterval has passed
|
||||
}
|
||||
|
||||
func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
@@ -92,11 +85,9 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
t.Cleanup(func() { _ = os.RemoveAll(config.RootDir) })
|
||||
|
||||
config.Consensus.CreateEmptyBlocks = false
|
||||
state, privVals := makeGenesisState(ctx, t, baseConfig, genesisStateArgs{
|
||||
Validators: 1,
|
||||
Power: 10})
|
||||
cs := newStateWithConfig(ctx, t, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
|
||||
assertMempool(t, cs.txNotifier).EnableTxsAvailable()
|
||||
state, privVals := randGenesisState(baseConfig, 1, false, 10)
|
||||
cs := newStateWithConfig(ctx, log.TestingLogger(), config, state, privVals[0], NewCounterApplication())
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock)
|
||||
newRoundCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound)
|
||||
@@ -105,35 +96,37 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
if cs.Height == 2 && cs.Round == 0 {
|
||||
// dont set the proposal in round 0 so we timeout and
|
||||
// go to next round
|
||||
cs.logger.Info("Ignoring set proposal at height 2, round 0")
|
||||
return nil
|
||||
}
|
||||
return cs.defaultSetProposal(proposal)
|
||||
}
|
||||
startTestRound(ctx, cs, height, round)
|
||||
|
||||
ensureNewRound(t, newRoundCh, height, round) // first round at first height
|
||||
ensureNewEventOnChannel(t, newBlockCh) // first block gets committed
|
||||
ensureNewRound(newRoundCh, height, round) // first round at first height
|
||||
ensureNewEventOnChannel(newBlockCh) // first block gets committed
|
||||
|
||||
height++ // moving to the next height
|
||||
round = 0
|
||||
|
||||
ensureNewRound(t, newRoundCh, height, round) // first round at next height
|
||||
deliverTxsRange(ctx, t, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
|
||||
ensureNewTimeout(t, timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
|
||||
ensureNewRound(newRoundCh, height, round) // first round at next height
|
||||
deliverTxsRange(ctx, cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round
|
||||
ensureNewTimeout(timeoutCh, height, round, cs.config.TimeoutPropose.Nanoseconds())
|
||||
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(t, newRoundCh, height, round) // wait for the next round
|
||||
ensureNewEventOnChannel(t, newBlockCh) // now we can commit the block
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round) // wait for the next round
|
||||
ensureNewEventOnChannel(newBlockCh) // now we can commit the block
|
||||
}
|
||||
|
||||
func deliverTxsRange(ctx context.Context, t *testing.T, cs *State, start, end int) {
|
||||
t.Helper()
|
||||
func deliverTxsRange(ctx context.Context, cs *State, start, end int) {
|
||||
// Deliver some txs.
|
||||
for i := start; i < end; i++ {
|
||||
txBytes := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(txBytes, uint64(i))
|
||||
err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{})
|
||||
require.NoError(t, err, "error after checkTx")
|
||||
err := assertMempool(cs.txNotifier).CheckTx(ctx, txBytes, nil, mempool.TxInfo{})
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error after CheckTx: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -143,15 +136,12 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
|
||||
config := configSetup(t)
|
||||
logger := log.TestingLogger()
|
||||
state, privVals := makeGenesisState(ctx, t, config, genesisStateArgs{
|
||||
Validators: 1,
|
||||
Power: 10})
|
||||
state, privVals := randGenesisState(config, 1, false, 10)
|
||||
stateStore := sm.NewStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
|
||||
cs := newStateWithConfigAndBlockStore(
|
||||
ctx,
|
||||
t,
|
||||
logger, config, state, privVals[0], NewCounterApplication(), blockStore)
|
||||
|
||||
err := stateStore.Save(state)
|
||||
@@ -159,7 +149,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
newBlockHeaderCh := subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlockHeader)
|
||||
|
||||
const numTxs int64 = 3000
|
||||
go deliverTxsRange(ctx, t, cs, 0, int(numTxs))
|
||||
go deliverTxsRange(ctx, cs, 0, int(numTxs))
|
||||
|
||||
startTestRound(ctx, cs, cs.Height, cs.Round)
|
||||
for n := int64(0); n < numTxs; {
|
||||
@@ -178,13 +168,11 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
state, privVals := makeGenesisState(ctx, t, config, genesisStateArgs{
|
||||
Validators: 1,
|
||||
Power: 10})
|
||||
state, privVals := randGenesisState(config, 1, false, 10)
|
||||
app := NewCounterApplication()
|
||||
stateStore := sm.NewStore(dbm.NewMemDB())
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
cs := newStateWithConfigAndBlockStore(ctx, t, log.TestingLogger(), config, state, privVals[0], app, blockStore)
|
||||
cs := newStateWithConfigAndBlockStore(ctx, log.TestingLogger(), config, state, privVals[0], app, blockStore)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -204,7 +192,7 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
// Try to send the tx through the mempool.
|
||||
// CheckTx should not err, but the app should return a bad abci code
|
||||
// and the tx should get removed from the pool
|
||||
err := assertMempool(t, cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.Response) {
|
||||
err := assertMempool(cs.txNotifier).CheckTx(ctx, txBytes, func(r *abci.Response) {
|
||||
if r.GetCheckTx().Code != code.CodeTypeBadNonce {
|
||||
t.Errorf("expected checktx to return bad nonce, got %v", r)
|
||||
return
|
||||
@@ -212,13 +200,13 @@ func TestMempoolRmBadTx(t *testing.T) {
|
||||
checkTxRespCh <- struct{}{}
|
||||
}, mempool.TxInfo{})
|
||||
if err != nil {
|
||||
t.Errorf("error after CheckTx: %w", err)
|
||||
t.Errorf("error after CheckTx: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
// check for the tx
|
||||
for {
|
||||
txs := assertMempool(t, cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1)
|
||||
txs := assertMempool(cs.txNotifier).ReapMaxBytesMaxGas(int64(len(txBytes)), -1)
|
||||
if len(txs) == 0 {
|
||||
emptyMempoolCh <- struct{}{}
|
||||
return
|
||||
|
||||
@@ -64,22 +64,6 @@ type Metrics struct {
|
||||
|
||||
// Histogram of time taken per step annotated with reason that the step proceeded.
|
||||
StepTime metrics.Histogram
|
||||
|
||||
// QuroumPrevoteMessageDelay is the interval in seconds between the proposal
|
||||
// timestamp and the timestamp of the earliest prevote that achieved a quorum
|
||||
// during the prevote step.
|
||||
//
|
||||
// To compute it, sum the voting power over each prevote received, in increasing
|
||||
// order of timestamp. The timestamp of the first prevote to increase the sum to
|
||||
// be above 2/3 of the total voting power of the network defines the endpoint
|
||||
// the endpoint of the interval. Subtract the proposal timestamp from this endpoint
|
||||
// to obtain the quorum delay.
|
||||
QuorumPrevoteMessageDelay metrics.Gauge
|
||||
|
||||
// FullPrevoteMessageDelay is the interval in seconds between the proposal
|
||||
// timestamp and the timestamp of the latest prevote in a round where 100%
|
||||
// of the voting power on the network issued prevotes.
|
||||
FullPrevoteMessageDelay metrics.Gauge
|
||||
}
|
||||
|
||||
// PrometheusMetrics returns Metrics build using Prometheus client library.
|
||||
@@ -212,20 +196,6 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics {
|
||||
Name: "step_time",
|
||||
Help: "Time spent per step.",
|
||||
}, append(labels, "step", "reason")).With(labelsAndValues...),
|
||||
QuorumPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "quorum_prevote_message_delay",
|
||||
Help: "Difference in seconds between the proposal timestamp and the timestamp " +
|
||||
"of the latest prevote that achieved a quorum in the prevote step.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
FullPrevoteMessageDelay: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Subsystem: MetricsSubsystem,
|
||||
Name: "full_prevote_message_delay",
|
||||
Help: "Difference in seconds between the proposal timestamp and the timestamp " +
|
||||
"of the latest prevote that achieved 100% of the voting power in the prevote step.",
|
||||
}, labels).With(labelsAndValues...),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -249,15 +219,13 @@ func NopMetrics() *Metrics {
|
||||
|
||||
BlockIntervalSeconds: discard.NewHistogram(),
|
||||
|
||||
NumTxs: discard.NewGauge(),
|
||||
BlockSizeBytes: discard.NewHistogram(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
QuorumPrevoteMessageDelay: discard.NewGauge(),
|
||||
FullPrevoteMessageDelay: discard.NewGauge(),
|
||||
NumTxs: discard.NewGauge(),
|
||||
BlockSizeBytes: discard.NewHistogram(),
|
||||
TotalTxs: discard.NewGauge(),
|
||||
CommittedHeight: discard.NewGauge(),
|
||||
BlockSyncing: discard.NewGauge(),
|
||||
StateSyncing: discard.NewGauge(),
|
||||
BlockParts: discard.NewCounter(),
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -109,7 +109,7 @@ func (m *NewValidBlockMessage) ValidateBasic() error {
|
||||
return errors.New("negative Round")
|
||||
}
|
||||
if err := m.BlockPartSetHeader.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("wrong BlockPartSetHeader: %w", err)
|
||||
return fmt.Errorf("wrong BlockPartSetHeader: %v", err)
|
||||
}
|
||||
if m.BlockParts.Size() == 0 {
|
||||
return errors.New("empty blockParts")
|
||||
@@ -191,7 +191,7 @@ func (m *BlockPartMessage) ValidateBasic() error {
|
||||
return errors.New("negative Round")
|
||||
}
|
||||
if err := m.Part.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("wrong Part: %w", err)
|
||||
return fmt.Errorf("wrong Part: %v", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -266,7 +266,7 @@ func (m *VoteSetMaj23Message) ValidateBasic() error {
|
||||
return errors.New("invalid Type")
|
||||
}
|
||||
if err := m.BlockID.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("wrong BlockID: %w", err)
|
||||
return fmt.Errorf("wrong BlockID: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -296,7 +296,7 @@ func (m *VoteSetBitsMessage) ValidateBasic() error {
|
||||
return errors.New("invalid Type")
|
||||
}
|
||||
if err := m.BlockID.ValidateBasic(); err != nil {
|
||||
return fmt.Errorf("wrong BlockID: %w", err)
|
||||
return fmt.Errorf("wrong BlockID: %v", err)
|
||||
}
|
||||
|
||||
// NOTE: Votes.Size() can be zero if the node does not have any
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"math"
|
||||
@@ -25,9 +24,6 @@ import (
|
||||
)
|
||||
|
||||
func TestMsgToProto(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
psh := types.PartSetHeader{
|
||||
Total: 1,
|
||||
Hash: tmrand.Bytes(32),
|
||||
@@ -66,7 +62,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
pbProposal := proposal.ToProto()
|
||||
|
||||
pv := types.NewMockPV()
|
||||
vote, err := factory.MakeVote(ctx, pv, factory.DefaultTestChainID,
|
||||
vote, err := factory.MakeVote(pv, factory.DefaultTestChainID,
|
||||
0, 1, 0, 2, types.BlockID{}, time.Now())
|
||||
require.NoError(t, err)
|
||||
pbVote := vote.ToProto()
|
||||
|
||||
@@ -1,489 +0,0 @@
|
||||
package consensus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
tmpubsub "github.com/tendermint/tendermint/internal/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtimemocks "github.com/tendermint/tendermint/libs/time/mocks"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
// blockTimeIota is used in the test harness as the time between
|
||||
// blocks when not otherwise specified.
|
||||
blockTimeIota = time.Millisecond
|
||||
)
|
||||
|
||||
// pbtsTestHarness constructs a Tendermint network that can be used for testing the
|
||||
// implementation of the Proposer-Based timestamps algorithm.
|
||||
// It runs a series of consensus heights and captures timing of votes and events.
|
||||
type pbtsTestHarness struct {
|
||||
// configuration options set by the user of the test harness.
|
||||
pbtsTestConfiguration
|
||||
|
||||
// The Tendermint consensus state machine being run during
|
||||
// a run of the pbtsTestHarness.
|
||||
observedState *State
|
||||
|
||||
// A stub for signing votes and messages using the key
|
||||
// from the observedState.
|
||||
observedValidator *validatorStub
|
||||
|
||||
// A list of simulated validators that interact with the observedState and are
|
||||
// fully controlled by the test harness.
|
||||
otherValidators []*validatorStub
|
||||
|
||||
// The mock time source used by all of the validator stubs in the test harness.
|
||||
// This mock clock allows the test harness to produce votes and blocks with arbitrary
|
||||
// timestamps.
|
||||
validatorClock *tmtimemocks.Source
|
||||
|
||||
chainID string
|
||||
|
||||
// channels for verifying that the observed validator completes certain actions.
|
||||
ensureProposalCh, roundCh, blockCh, ensureVoteCh <-chan tmpubsub.Message
|
||||
|
||||
// channel of events from the observed validator annotated with the timestamp
|
||||
// the event was received.
|
||||
eventCh <-chan timestampedEvent
|
||||
|
||||
currentHeight int64
|
||||
currentRound int32
|
||||
}
|
||||
|
||||
type pbtsTestConfiguration struct {
|
||||
// The timestamp consensus parameters to be used by the state machine under test.
|
||||
synchronyParams types.SynchronyParams
|
||||
|
||||
// The setting to use for the TimeoutPropose configuration parameter.
|
||||
timeoutPropose time.Duration
|
||||
|
||||
// The timestamp of the first block produced by the network.
|
||||
genesisTime time.Time
|
||||
|
||||
// The time at which the proposal at height 2 should be delivered.
|
||||
height2ProposalDeliverTime time.Time
|
||||
|
||||
// The timestamp of the block proposed at height 2.
|
||||
height2ProposedBlockTime time.Time
|
||||
|
||||
// The timestamp of the block proposed at height 4.
|
||||
// At height 4, the proposed block time and the deliver time are the same so
|
||||
// that timely-ness does not affect height 4.
|
||||
height4ProposedBlockTime time.Time
|
||||
}
|
||||
|
||||
func newPBTSTestHarness(ctx context.Context, t *testing.T, tc pbtsTestConfiguration) pbtsTestHarness {
|
||||
t.Helper()
|
||||
const validators = 4
|
||||
cfg := configSetup(t)
|
||||
clock := new(tmtimemocks.Source)
|
||||
if tc.height4ProposedBlockTime.IsZero() {
|
||||
|
||||
// Set a default height4ProposedBlockTime.
|
||||
// Use a proposed block time that is greater than the time that the
|
||||
// block at height 2 was delivered. Height 3 is not relevant for testing
|
||||
// and always occurs blockTimeIota before height 4. If not otherwise specified,
|
||||
// height 4 therefore occurs 2*blockTimeIota after height 2.
|
||||
tc.height4ProposedBlockTime = tc.height2ProposalDeliverTime.Add(2 * blockTimeIota)
|
||||
}
|
||||
cfg.Consensus.TimeoutPropose = tc.timeoutPropose
|
||||
consensusParams := types.DefaultConsensusParams()
|
||||
consensusParams.Synchrony = tc.synchronyParams
|
||||
|
||||
state, privVals := makeGenesisState(ctx, t, cfg, genesisStateArgs{
|
||||
Params: consensusParams,
|
||||
Time: tc.genesisTime,
|
||||
Validators: validators,
|
||||
})
|
||||
cs := newState(ctx, t, log.TestingLogger(), state, privVals[0], kvstore.NewApplication())
|
||||
vss := make([]*validatorStub, validators)
|
||||
for i := 0; i < validators; i++ {
|
||||
vss[i] = newValidatorStub(privVals[i], int32(i))
|
||||
}
|
||||
incrementHeight(vss[1:]...)
|
||||
|
||||
for _, vs := range vss {
|
||||
vs.clock = clock
|
||||
}
|
||||
pubKey, err := vss[0].PrivValidator.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
eventCh := timestampedCollector(ctx, t, cs.eventBus)
|
||||
|
||||
return pbtsTestHarness{
|
||||
pbtsTestConfiguration: tc,
|
||||
observedValidator: vss[0],
|
||||
observedState: cs,
|
||||
otherValidators: vss[1:],
|
||||
validatorClock: clock,
|
||||
currentHeight: 1,
|
||||
chainID: cfg.ChainID(),
|
||||
roundCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewRound),
|
||||
ensureProposalCh: subscribe(ctx, t, cs.eventBus, types.EventQueryCompleteProposal),
|
||||
blockCh: subscribe(ctx, t, cs.eventBus, types.EventQueryNewBlock),
|
||||
ensureVoteCh: subscribeToVoterBuffered(ctx, t, cs, pubKey.Address()),
|
||||
eventCh: eventCh,
|
||||
}
|
||||
}
|
||||
|
||||
func (p *pbtsTestHarness) observedValidatorProposerHeight(ctx context.Context, t *testing.T, previousBlockTime time.Time) heightResult {
|
||||
p.validatorClock.On("Now").Return(p.height2ProposedBlockTime).Times(6)
|
||||
|
||||
ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound)
|
||||
|
||||
timeout := time.Until(previousBlockTime.Add(ensureTimeout))
|
||||
ensureProposalWithTimeout(t, p.ensureProposalCh, p.currentHeight, p.currentRound, nil, timeout)
|
||||
|
||||
rs := p.observedState.GetRoundState()
|
||||
bid := types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()}
|
||||
ensurePrevote(t, p.ensureVoteCh, p.currentHeight, p.currentRound)
|
||||
signAddVotes(ctx, t, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...)
|
||||
|
||||
signAddVotes(ctx, t, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...)
|
||||
ensurePrecommit(t, p.ensureVoteCh, p.currentHeight, p.currentRound)
|
||||
|
||||
ensureNewBlock(t, p.blockCh, p.currentHeight)
|
||||
|
||||
vk, err := p.observedValidator.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
res := collectHeightResults(ctx, t, p.eventCh, p.currentHeight, vk.Address())
|
||||
|
||||
p.currentHeight++
|
||||
incrementHeight(p.otherValidators...)
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *pbtsTestHarness) height2(ctx context.Context, t *testing.T) heightResult {
|
||||
signer := p.otherValidators[0].PrivValidator
|
||||
height3BlockTime := p.height2ProposedBlockTime.Add(-blockTimeIota)
|
||||
return p.nextHeight(ctx, t, signer, p.height2ProposalDeliverTime, p.height2ProposedBlockTime, height3BlockTime)
|
||||
}
|
||||
|
||||
func (p *pbtsTestHarness) intermediateHeights(ctx context.Context, t *testing.T) {
|
||||
signer := p.otherValidators[1].PrivValidator
|
||||
blockTimeHeight3 := p.height4ProposedBlockTime.Add(-blockTimeIota)
|
||||
p.nextHeight(ctx, t, signer, blockTimeHeight3, blockTimeHeight3, p.height4ProposedBlockTime)
|
||||
|
||||
signer = p.otherValidators[2].PrivValidator
|
||||
p.nextHeight(ctx, t, signer, p.height4ProposedBlockTime, p.height4ProposedBlockTime, time.Now())
|
||||
}
|
||||
|
||||
func (p *pbtsTestHarness) height5(ctx context.Context, t *testing.T) heightResult {
|
||||
return p.observedValidatorProposerHeight(ctx, t, p.height4ProposedBlockTime)
|
||||
}
|
||||
|
||||
func (p *pbtsTestHarness) nextHeight(ctx context.Context, t *testing.T, proposer types.PrivValidator, deliverTime, proposedTime, nextProposedTime time.Time) heightResult {
|
||||
p.validatorClock.On("Now").Return(nextProposedTime).Times(6)
|
||||
|
||||
ensureNewRound(t, p.roundCh, p.currentHeight, p.currentRound)
|
||||
|
||||
b, _, err := p.observedState.createProposalBlock()
|
||||
require.NoError(t, err)
|
||||
b.Height = p.currentHeight
|
||||
b.Header.Height = p.currentHeight
|
||||
b.Header.Time = proposedTime
|
||||
|
||||
k, err := proposer.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
b.Header.ProposerAddress = k.Address()
|
||||
ps, err := b.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
bid := types.BlockID{Hash: b.Hash(), PartSetHeader: ps.Header()}
|
||||
prop := types.NewProposal(p.currentHeight, 0, -1, bid, proposedTime)
|
||||
tp := prop.ToProto()
|
||||
|
||||
if err := proposer.SignProposal(ctx, p.observedState.state.ChainID, tp); err != nil {
|
||||
t.Fatalf("error signing proposal: %s", err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Until(deliverTime))
|
||||
prop.Signature = tp.Signature
|
||||
if err := p.observedState.SetProposalAndBlock(ctx, prop, b, ps, "peerID"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureProposal(t, p.ensureProposalCh, p.currentHeight, 0, bid)
|
||||
|
||||
ensurePrevote(t, p.ensureVoteCh, p.currentHeight, p.currentRound)
|
||||
signAddVotes(ctx, t, p.observedState, tmproto.PrevoteType, p.chainID, bid, p.otherValidators...)
|
||||
|
||||
signAddVotes(ctx, t, p.observedState, tmproto.PrecommitType, p.chainID, bid, p.otherValidators...)
|
||||
ensurePrecommit(t, p.ensureVoteCh, p.currentHeight, p.currentRound)
|
||||
|
||||
vk, err := p.observedValidator.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
res := collectHeightResults(ctx, t, p.eventCh, p.currentHeight, vk.Address())
|
||||
ensureNewBlock(t, p.blockCh, p.currentHeight)
|
||||
|
||||
p.currentHeight++
|
||||
incrementHeight(p.otherValidators...)
|
||||
return res
|
||||
}
|
||||
|
||||
func timestampedCollector(ctx context.Context, t *testing.T, eb *eventbus.EventBus) <-chan timestampedEvent {
|
||||
t.Helper()
|
||||
|
||||
// Since eventCh is not read until the end of each height, it must be large
|
||||
// enough to hold all of the events produced during a single height.
|
||||
eventCh := make(chan timestampedEvent, 100)
|
||||
|
||||
if err := eb.Observe(ctx, func(msg tmpubsub.Message) error {
|
||||
eventCh <- timestampedEvent{
|
||||
ts: time.Now(),
|
||||
m: msg,
|
||||
}
|
||||
return nil
|
||||
}, types.EventQueryVote, types.EventQueryCompleteProposal); err != nil {
|
||||
t.Fatalf("Failed to observe query %v: %v", types.EventQueryVote, err)
|
||||
}
|
||||
return eventCh
|
||||
}
|
||||
|
||||
func collectHeightResults(ctx context.Context, t *testing.T, eventCh <-chan timestampedEvent, height int64, address []byte) heightResult {
|
||||
t.Helper()
|
||||
var res heightResult
|
||||
for event := range eventCh {
|
||||
switch v := event.m.Data().(type) {
|
||||
case types.EventDataVote:
|
||||
if v.Vote.Height > height {
|
||||
t.Fatalf("received prevote from unexpected height, expected: %d, saw: %d", height, v.Vote.Height)
|
||||
}
|
||||
if !bytes.Equal(address, v.Vote.ValidatorAddress) {
|
||||
continue
|
||||
}
|
||||
if v.Vote.Type != tmproto.PrevoteType {
|
||||
continue
|
||||
}
|
||||
res.prevote = v.Vote
|
||||
res.prevoteIssuedAt = event.ts
|
||||
|
||||
case types.EventDataCompleteProposal:
|
||||
if v.Height > height {
|
||||
t.Fatalf("received proposal from unexpected height, expected: %d, saw: %d", height, v.Height)
|
||||
}
|
||||
res.proposalIssuedAt = event.ts
|
||||
}
|
||||
if res.isComplete() {
|
||||
return res
|
||||
}
|
||||
}
|
||||
t.Fatalf("complete height result never seen for height %d", height)
|
||||
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
type timestampedEvent struct {
|
||||
ts time.Time
|
||||
m tmpubsub.Message
|
||||
}
|
||||
|
||||
func (p *pbtsTestHarness) run(ctx context.Context, t *testing.T) resultSet {
|
||||
startTestRound(ctx, p.observedState, p.currentHeight, p.currentRound)
|
||||
|
||||
r1 := p.observedValidatorProposerHeight(ctx, t, p.genesisTime)
|
||||
r2 := p.height2(ctx, t)
|
||||
p.intermediateHeights(ctx, t)
|
||||
r5 := p.height5(ctx, t)
|
||||
return resultSet{
|
||||
genesisHeight: r1,
|
||||
height2: r2,
|
||||
height5: r5,
|
||||
}
|
||||
}
|
||||
|
||||
type resultSet struct {
|
||||
genesisHeight heightResult
|
||||
height2 heightResult
|
||||
height5 heightResult
|
||||
}
|
||||
|
||||
type heightResult struct {
|
||||
proposalIssuedAt time.Time
|
||||
prevote *types.Vote
|
||||
prevoteIssuedAt time.Time
|
||||
}
|
||||
|
||||
func (hr heightResult) isComplete() bool {
|
||||
return !hr.proposalIssuedAt.IsZero() && !hr.prevoteIssuedAt.IsZero() && hr.prevote != nil
|
||||
}
|
||||
|
||||
// TestProposerWaitsForGenesisTime tests that a proposer will not propose a block
|
||||
// until after the genesis time has passed. The test sets the genesis time in the
|
||||
// future and then ensures that the observed validator waits to propose a block.
|
||||
func TestProposerWaitsForGenesisTime(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
// create a genesis time far (enough) in the future.
|
||||
initialTime := time.Now().Add(800 * time.Millisecond)
|
||||
cfg := pbtsTestConfiguration{
|
||||
synchronyParams: types.SynchronyParams{
|
||||
Precision: 10 * time.Millisecond,
|
||||
MessageDelay: 10 * time.Millisecond,
|
||||
},
|
||||
timeoutPropose: 10 * time.Millisecond,
|
||||
genesisTime: initialTime,
|
||||
height2ProposalDeliverTime: initialTime.Add(10 * time.Millisecond),
|
||||
height2ProposedBlockTime: initialTime.Add(10 * time.Millisecond),
|
||||
}
|
||||
|
||||
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
|
||||
results := pbtsTest.run(ctx, t)
|
||||
|
||||
// ensure that the proposal was issued after the genesis time.
|
||||
assert.True(t, results.genesisHeight.proposalIssuedAt.After(cfg.genesisTime))
|
||||
}
|
||||
|
||||
// TestProposerWaitsForPreviousBlock tests that the proposer of a block waits until
|
||||
// the block time of the previous height has passed to propose the next block.
|
||||
// The test harness ensures that the observed validator will be the proposer at
|
||||
// height 1 and height 5. The test sets the block time of height 4 in the future
|
||||
// and then verifies that the observed validator waits until after the block time
|
||||
// of height 4 to propose a block at height 5.
|
||||
func TestProposerWaitsForPreviousBlock(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
initialTime := time.Now().Add(time.Millisecond * 50)
|
||||
cfg := pbtsTestConfiguration{
|
||||
synchronyParams: types.SynchronyParams{
|
||||
Precision: 100 * time.Millisecond,
|
||||
MessageDelay: 500 * time.Millisecond,
|
||||
},
|
||||
timeoutPropose: 50 * time.Millisecond,
|
||||
genesisTime: initialTime,
|
||||
height2ProposalDeliverTime: initialTime.Add(150 * time.Millisecond),
|
||||
height2ProposedBlockTime: initialTime.Add(100 * time.Millisecond),
|
||||
height4ProposedBlockTime: initialTime.Add(800 * time.Millisecond),
|
||||
}
|
||||
|
||||
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
|
||||
results := pbtsTest.run(ctx, t)
|
||||
|
||||
// the observed validator is the proposer at height 5.
|
||||
// ensure that the observed validator did not propose a block until after
|
||||
// the time configured for height 4.
|
||||
assert.True(t, results.height5.proposalIssuedAt.After(cfg.height4ProposedBlockTime))
|
||||
|
||||
// Ensure that the validator issued a prevote for a non-nil block.
|
||||
assert.NotNil(t, results.height5.prevote.BlockID.Hash)
|
||||
}
|
||||
|
||||
func TestProposerWaitTime(t *testing.T) {
|
||||
genesisTime, err := time.Parse(time.RFC3339, "2019-03-13T23:00:00Z")
|
||||
require.NoError(t, err)
|
||||
testCases := []struct {
|
||||
name string
|
||||
previousBlockTime time.Time
|
||||
localTime time.Time
|
||||
expectedWait time.Duration
|
||||
}{
|
||||
{
|
||||
name: "block time greater than local time",
|
||||
previousBlockTime: genesisTime.Add(5 * time.Nanosecond),
|
||||
localTime: genesisTime.Add(1 * time.Nanosecond),
|
||||
expectedWait: 4 * time.Nanosecond,
|
||||
},
|
||||
{
|
||||
name: "local time greater than block time",
|
||||
previousBlockTime: genesisTime.Add(1 * time.Nanosecond),
|
||||
localTime: genesisTime.Add(5 * time.Nanosecond),
|
||||
expectedWait: 0,
|
||||
},
|
||||
{
|
||||
name: "both times equal",
|
||||
previousBlockTime: genesisTime.Add(5 * time.Nanosecond),
|
||||
localTime: genesisTime.Add(5 * time.Nanosecond),
|
||||
expectedWait: 0,
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
mockSource := new(tmtimemocks.Source)
|
||||
mockSource.On("Now").Return(testCase.localTime)
|
||||
|
||||
ti := proposerWaitTime(mockSource, testCase.previousBlockTime)
|
||||
assert.Equal(t, testCase.expectedWait, ti)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTimelyProposal(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
initialTime := time.Now()
|
||||
|
||||
cfg := pbtsTestConfiguration{
|
||||
synchronyParams: types.SynchronyParams{
|
||||
Precision: 10 * time.Millisecond,
|
||||
MessageDelay: 140 * time.Millisecond,
|
||||
},
|
||||
timeoutPropose: 40 * time.Millisecond,
|
||||
genesisTime: initialTime,
|
||||
height2ProposedBlockTime: initialTime.Add(10 * time.Millisecond),
|
||||
height2ProposalDeliverTime: initialTime.Add(30 * time.Millisecond),
|
||||
}
|
||||
|
||||
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
|
||||
results := pbtsTest.run(ctx, t)
|
||||
require.NotNil(t, results.height2.prevote.BlockID.Hash)
|
||||
}
|
||||
|
||||
func TestTooFarInThePastProposal(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
initialTime := time.Now()
|
||||
|
||||
// localtime > proposedBlockTime + MsgDelay + Precision
|
||||
cfg := pbtsTestConfiguration{
|
||||
synchronyParams: types.SynchronyParams{
|
||||
Precision: 1 * time.Millisecond,
|
||||
MessageDelay: 10 * time.Millisecond,
|
||||
},
|
||||
timeoutPropose: 50 * time.Millisecond,
|
||||
genesisTime: initialTime,
|
||||
height2ProposedBlockTime: initialTime.Add(10 * time.Millisecond),
|
||||
height2ProposalDeliverTime: initialTime.Add(21 * time.Millisecond),
|
||||
}
|
||||
|
||||
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
|
||||
results := pbtsTest.run(ctx, t)
|
||||
|
||||
require.Nil(t, results.height2.prevote.BlockID.Hash)
|
||||
}
|
||||
|
||||
func TestTooFarInTheFutureProposal(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
initialTime := time.Now()
|
||||
|
||||
// localtime < proposedBlockTime - Precision
|
||||
cfg := pbtsTestConfiguration{
|
||||
synchronyParams: types.SynchronyParams{
|
||||
Precision: 1 * time.Millisecond,
|
||||
MessageDelay: 10 * time.Millisecond,
|
||||
},
|
||||
timeoutPropose: 50 * time.Millisecond,
|
||||
genesisTime: initialTime,
|
||||
height2ProposedBlockTime: initialTime.Add(100 * time.Millisecond),
|
||||
height2ProposalDeliverTime: initialTime.Add(10 * time.Millisecond),
|
||||
height4ProposedBlockTime: initialTime.Add(150 * time.Millisecond),
|
||||
}
|
||||
|
||||
pbtsTest := newPBTSTestHarness(ctx, t, cfg)
|
||||
results := pbtsTest.run(ctx, t)
|
||||
|
||||
require.Nil(t, results.height2.prevote.BlockID.Hash)
|
||||
}
|
||||
@@ -23,8 +23,8 @@ var (
|
||||
|
||||
// peerStateStats holds internal statistics for a peer.
|
||||
type peerStateStats struct {
|
||||
Votes int `json:"votes,string"`
|
||||
BlockParts int `json:"block_parts,string"`
|
||||
Votes int `json:"votes"`
|
||||
BlockParts int `json:"block_parts"`
|
||||
}
|
||||
|
||||
func (pss peerStateStats) String() string {
|
||||
|
||||
@@ -28,9 +28,9 @@ var (
|
||||
|
||||
// GetChannelDescriptor produces an instance of a descriptor for this
|
||||
// package's required channels.
|
||||
func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor {
|
||||
return map[p2p.ChannelID]*p2p.ChannelDescriptor{
|
||||
StateChannel: {
|
||||
func GetChannelDescriptors() []*p2p.ChannelDescriptor {
|
||||
return []*p2p.ChannelDescriptor{
|
||||
{
|
||||
ID: StateChannel,
|
||||
MessageType: new(tmcons.Message),
|
||||
Priority: 8,
|
||||
@@ -38,7 +38,7 @@ func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor {
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
RecvBufferCapacity: 128,
|
||||
},
|
||||
DataChannel: {
|
||||
{
|
||||
// TODO: Consider a split between gossiping current block and catchup
|
||||
// stuff. Once we gossip the whole block there is nothing left to send
|
||||
// until next height or round.
|
||||
@@ -49,7 +49,7 @@ func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor {
|
||||
RecvBufferCapacity: 512,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
VoteChannel: {
|
||||
{
|
||||
ID: VoteChannel,
|
||||
MessageType: new(tmcons.Message),
|
||||
Priority: 10,
|
||||
@@ -57,7 +57,7 @@ func getChannelDescriptors() map[p2p.ChannelID]*p2p.ChannelDescriptor {
|
||||
RecvBufferCapacity: 128,
|
||||
RecvMessageCapacity: maxMsgSize,
|
||||
},
|
||||
VoteSetBitsChannel: {
|
||||
{
|
||||
ID: VoteSetBitsChannel,
|
||||
MessageType: new(tmcons.Message),
|
||||
Priority: 5,
|
||||
@@ -82,6 +82,8 @@ const (
|
||||
listenerIDConsensus = "consensus-reactor"
|
||||
)
|
||||
|
||||
type ReactorOption func(*Reactor)
|
||||
|
||||
// NOTE: Temporary interface for switching to block sync, we should get rid of v0.
|
||||
// See: https://github.com/tendermint/tendermint/issues/4595
|
||||
type BlockSyncReactor interface {
|
||||
@@ -131,40 +133,23 @@ type Reactor struct {
|
||||
// to relevant p2p Channels and a channel to listen for peer updates on. The
|
||||
// reactor will close all p2p Channels when stopping.
|
||||
func NewReactor(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
cs *State,
|
||||
channelCreator p2p.ChannelCreator,
|
||||
stateCh *p2p.Channel,
|
||||
dataCh *p2p.Channel,
|
||||
voteCh *p2p.Channel,
|
||||
voteSetBitsCh *p2p.Channel,
|
||||
peerUpdates *p2p.PeerUpdates,
|
||||
waitSync bool,
|
||||
metrics *Metrics,
|
||||
) (*Reactor, error) {
|
||||
chans := getChannelDescriptors()
|
||||
stateCh, err := channelCreator(ctx, chans[StateChannel])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
options ...ReactorOption,
|
||||
) *Reactor {
|
||||
|
||||
dataCh, err := channelCreator(ctx, chans[DataChannel])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
voteCh, err := channelCreator(ctx, chans[VoteChannel])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
voteSetBitsCh, err := channelCreator(ctx, chans[VoteSetBitsChannel])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r := &Reactor{
|
||||
logger: logger,
|
||||
state: cs,
|
||||
waitSync: waitSync,
|
||||
peers: make(map[types.NodeID]*PeerState),
|
||||
Metrics: metrics,
|
||||
Metrics: NopMetrics(),
|
||||
stateCh: stateCh,
|
||||
dataCh: dataCh,
|
||||
voteCh: voteCh,
|
||||
@@ -173,7 +158,11 @@ func NewReactor(
|
||||
}
|
||||
r.BaseService = *service.NewBaseService(logger, "Consensus", r)
|
||||
|
||||
return r, nil
|
||||
for _, opt := range options {
|
||||
opt(r)
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
// OnStart starts separate go routines for each p2p Channel and listens for
|
||||
@@ -247,6 +236,11 @@ func (r *Reactor) WaitSync() bool {
|
||||
return r.waitSync
|
||||
}
|
||||
|
||||
// ReactorMetrics sets the reactor's metrics as an option function.
|
||||
func ReactorMetrics(metrics *Metrics) ReactorOption {
|
||||
return func(r *Reactor) { r.Metrics = metrics }
|
||||
}
|
||||
|
||||
// SwitchToConsensus switches from block-sync mode to consensus mode. It resets
|
||||
// the state, turns off block-sync, and starts the consensus state-machine.
|
||||
func (r *Reactor) SwitchToConsensus(ctx context.Context, state sm.State, skipWAL bool) {
|
||||
@@ -1455,6 +1449,7 @@ func (r *Reactor) processPeerUpdates(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
r.logger.Debug("stopped listening on peer updates channel; closing...")
|
||||
return
|
||||
case peerUpdate := <-r.peerUpdates.Updates():
|
||||
r.processPeerUpdate(ctx, peerUpdate)
|
||||
|
||||
@@ -24,12 +24,12 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
tmpubsub "github.com/tendermint/tendermint/internal/pubsub"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
statemocks "github.com/tendermint/tendermint/internal/state/mocks"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -81,38 +81,22 @@ func setup(
|
||||
rts.voteSetBitsChannels = rts.network.MakeChannelsNoCleanup(ctx, t, chDesc(VoteSetBitsChannel, size))
|
||||
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
chCreator := func(nodeID types.NodeID) p2p.ChannelCreator {
|
||||
return func(ctx context.Context, desc *p2p.ChannelDescriptor) (*p2p.Channel, error) {
|
||||
switch desc.ID {
|
||||
case StateChannel:
|
||||
return rts.stateChannels[nodeID], nil
|
||||
case DataChannel:
|
||||
return rts.dataChannels[nodeID], nil
|
||||
case VoteChannel:
|
||||
return rts.voteChannels[nodeID], nil
|
||||
case VoteSetBitsChannel:
|
||||
return rts.voteSetBitsChannels[nodeID], nil
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid channel; %v", desc.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Canceled during cleanup (see below).
|
||||
|
||||
i := 0
|
||||
for nodeID, node := range rts.network.Nodes {
|
||||
state := states[i]
|
||||
|
||||
reactor, err := NewReactor(ctx,
|
||||
reactor := NewReactor(
|
||||
state.logger.With("node", nodeID),
|
||||
state,
|
||||
chCreator(nodeID),
|
||||
rts.stateChannels[nodeID],
|
||||
rts.dataChannels[nodeID],
|
||||
rts.voteChannels[nodeID],
|
||||
rts.voteSetBitsChannels[nodeID],
|
||||
node.MakePeerUpdates(ctx, t),
|
||||
true,
|
||||
NopMetrics(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.SetEventBus(state.eventBus)
|
||||
|
||||
@@ -142,7 +126,6 @@ func setup(
|
||||
|
||||
require.NoError(t, reactor.Start(ctx))
|
||||
require.True(t, reactor.IsRunning())
|
||||
t.Cleanup(reactor.Wait)
|
||||
|
||||
i++
|
||||
}
|
||||
@@ -152,7 +135,10 @@ func setup(
|
||||
// start the in-memory network and connect all peers with each other
|
||||
rts.network.Start(ctx, t)
|
||||
|
||||
t.Cleanup(leaktest.Check(t))
|
||||
t.Cleanup(func() {
|
||||
cancel()
|
||||
leaktest.Check(t)
|
||||
})
|
||||
|
||||
return rts
|
||||
}
|
||||
@@ -198,7 +184,7 @@ func waitForAndValidateBlock(
|
||||
require.NoError(t, validateBlock(newBlock, activeVals))
|
||||
|
||||
for _, tx := range txs {
|
||||
require.NoError(t, assertMempool(t, states[j].txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}))
|
||||
require.NoError(t, assertMempool(states[j].txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -324,7 +310,7 @@ func TestReactorBasic(t *testing.T) {
|
||||
cfg := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := makeConsensusState(ctx, t,
|
||||
states, cleanup := randConsensusState(ctx, t,
|
||||
cfg, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
@@ -381,8 +367,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
tickerFunc := newMockTickerFunc(true)
|
||||
appFunc := newKVStore
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, n, 30)
|
||||
genDoc := factory.GenesisDoc(cfg, time.Now(), valSet.Validators, nil)
|
||||
genDoc, privVals := factory.RandGenesisDoc(cfg, n, false, 30)
|
||||
states := make([]*State, n)
|
||||
logger := consensusLogger()
|
||||
|
||||
@@ -396,8 +381,8 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
defer os.RemoveAll(thisConfig.RootDir)
|
||||
|
||||
ensureDir(t, path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc(t, logger)
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
@@ -407,8 +392,8 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
// one for mempool, one for consensus
|
||||
mtx := new(sync.Mutex)
|
||||
proxyAppConnMem := abciclient.NewLocalClient(logger, mtx, app)
|
||||
proxyAppConnCon := abciclient.NewLocalClient(logger, mtx, app)
|
||||
proxyAppConnMem := abciclient.NewLocalClient(mtx, app)
|
||||
proxyAppConnCon := abciclient.NewLocalClient(mtx, app)
|
||||
|
||||
mempool := mempool.NewTxMempool(
|
||||
log.TestingLogger().With("module", "mempool"),
|
||||
@@ -425,8 +410,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// everyone includes evidence of another double signing
|
||||
vIdx := (i + 1) % n
|
||||
|
||||
ev, err := types.NewMockDuplicateVoteEvidenceWithValidator(ctx, 1, defaultTestTime, privVals[vIdx], cfg.ChainID())
|
||||
require.NoError(t, err)
|
||||
ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], cfg.ChainID())
|
||||
evpool := &statemocks.EvidencePool{}
|
||||
evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
|
||||
evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{
|
||||
@@ -438,7 +422,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyAppConnCon, mempool, evpool, blockStore)
|
||||
cs := NewState(ctx, logger.With("validator", i, "module", "consensus"),
|
||||
thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool2)
|
||||
cs.SetPrivValidator(ctx, pv)
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := eventbus.NewDefault(log.TestingLogger().With("module", "events"))
|
||||
require.NoError(t, eventBus.Start(ctx))
|
||||
@@ -485,7 +469,8 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
cfg := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := makeConsensusState(ctx,
|
||||
states, cleanup := randConsensusState(
|
||||
ctx,
|
||||
t,
|
||||
cfg,
|
||||
n,
|
||||
@@ -509,7 +494,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
// send a tx
|
||||
require.NoError(
|
||||
t,
|
||||
assertMempool(t, states[3].txNotifier).CheckTx(
|
||||
assertMempool(states[3].txNotifier).CheckTx(
|
||||
ctx,
|
||||
[]byte{1, 2, 3},
|
||||
nil,
|
||||
@@ -541,7 +526,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
cfg := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := makeConsensusState(ctx, t,
|
||||
states, cleanup := randConsensusState(ctx, t,
|
||||
cfg, n, "consensus_reactor_test",
|
||||
newMockTickerFunc(true), newKVStore)
|
||||
t.Cleanup(cleanup)
|
||||
@@ -606,7 +591,8 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
cfg := configSetup(t)
|
||||
|
||||
n := 4
|
||||
states, cleanup := makeConsensusState(ctx,
|
||||
states, cleanup := randConsensusState(
|
||||
ctx,
|
||||
t,
|
||||
cfg,
|
||||
n,
|
||||
@@ -716,7 +702,6 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
nVals := 4
|
||||
states, _, _, cleanup := randConsensusNetWithPeers(
|
||||
ctx,
|
||||
t,
|
||||
cfg,
|
||||
nVals,
|
||||
nPeers,
|
||||
@@ -729,7 +714,6 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
rts := setup(ctx, t, nPeers, states, 100) // buffer must be large enough to not deadlock
|
||||
|
||||
for _, reactor := range rts.reactors {
|
||||
reactor.state.logger = log.NewTestingLogger(t)
|
||||
state := reactor.state.GetState()
|
||||
reactor.SwitchToConsensus(ctx, state, false)
|
||||
}
|
||||
@@ -737,7 +721,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
// map of active validators
|
||||
activeVals := make(map[string]struct{})
|
||||
for i := 0; i < nVals; i++ {
|
||||
pubKey, err := states[i].privValidator.GetPubKey(ctx)
|
||||
pubKey, err := states[i].privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
activeVals[string(pubKey.Address())] = struct{}{}
|
||||
|
||||
@@ -50,9 +50,9 @@ func (cs *State) readReplayMessage(ctx context.Context, msg *TimedWALMessage, ne
|
||||
cs.logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
|
||||
// these are playback checks
|
||||
if newStepSub != nil {
|
||||
ctxto, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
stepMsg, err := newStepSub.Next(ctxto)
|
||||
stepMsg, err := newStepSub.Next(ctx)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
return fmt.Errorf("subscription timed out: %w", err)
|
||||
} else if err != nil {
|
||||
@@ -240,9 +240,9 @@ func (h *Handshaker) NBlocks() int {
|
||||
func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) error {
|
||||
|
||||
// Handshake is done via ABCI Info on the query conn.
|
||||
res, err := proxyApp.Query().Info(ctx, proxy.RequestInfo)
|
||||
res, err := proxyApp.Query().InfoSync(ctx, proxy.RequestInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error calling Info: %w", err)
|
||||
return fmt.Errorf("error calling Info: %v", err)
|
||||
}
|
||||
|
||||
blockHeight := res.LastBlockHeight
|
||||
@@ -266,7 +266,7 @@ func (h *Handshaker) Handshake(ctx context.Context, proxyApp proxy.AppConns) err
|
||||
// Replay blocks up to the latest in the blockstore.
|
||||
_, err = h.ReplayBlocks(ctx, h.initialState, appHash, blockHeight, proxyApp)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error on replay: %w", err)
|
||||
return fmt.Errorf("error on replay: %v", err)
|
||||
}
|
||||
|
||||
h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced",
|
||||
@@ -316,7 +316,7 @@ func (h *Handshaker) ReplayBlocks(
|
||||
Validators: nextVals,
|
||||
AppStateBytes: h.genDoc.AppState,
|
||||
}
|
||||
res, err := proxyApp.Consensus().InitChain(ctx, req)
|
||||
res, err := proxyApp.Consensus().InitChainSync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -422,17 +422,9 @@ func (h *Handshaker) ReplayBlocks(
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mockApp, err := newMockProxyApp(ctx, h.logger, appHash, abciResponses)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
mockApp := newMockProxyApp(ctx, h.logger, appHash, abciResponses)
|
||||
h.logger.Info("Replay last block using mock app")
|
||||
state, err = h.replayBlock(ctx, state, storeBlockHeight, mockApp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return state.AppHash, err
|
||||
}
|
||||
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
tmpubsub "github.com/tendermint/tendermint/internal/pubsub"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -74,7 +74,7 @@ func (cs *State) ReplayFile(ctx context.Context, file string, console bool) erro
|
||||
defer func() {
|
||||
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
|
||||
if err := cs.eventBus.Unsubscribe(ctx, args); err != nil {
|
||||
cs.logger.Error("error unsubscribing to event bus", "err", err)
|
||||
cs.logger.Error("Error unsubscribing to event bus", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -91,7 +91,7 @@ func (cs *State) ReplayFile(ctx context.Context, file string, console bool) erro
|
||||
var msg *TimedWALMessage
|
||||
for {
|
||||
if nextN == 0 && console {
|
||||
nextN, err = pb.replayConsoleLoop(ctx)
|
||||
nextN, err = pb.replayConsoleLoop()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -187,7 +187,7 @@ func (cs *State) startForReplay() {
|
||||
|
||||
// console function for parsing input and running commands. The integer
|
||||
// return value is invalid unless the error is nil.
|
||||
func (pb *playback) replayConsoleLoop(ctx context.Context) (int, error) {
|
||||
func (pb *playback) replayConsoleLoop() (int, error) {
|
||||
for {
|
||||
fmt.Printf("> ")
|
||||
bufReader := bufio.NewReader(os.Stdin)
|
||||
@@ -225,6 +225,7 @@ func (pb *playback) replayConsoleLoop(ctx context.Context) (int, error) {
|
||||
// NOTE: "back" is not supported in the state machine design,
|
||||
// so we restart and replay up to
|
||||
|
||||
ctx := context.TODO()
|
||||
// ensure all new step events are regenerated as expected
|
||||
|
||||
newStepSub, err := pb.cs.eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{
|
||||
@@ -237,7 +238,7 @@ func (pb *playback) replayConsoleLoop(ctx context.Context) (int, error) {
|
||||
defer func() {
|
||||
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
|
||||
if err := pb.cs.eventBus.Unsubscribe(ctx, args); err != nil {
|
||||
pb.cs.logger.Error("error unsubscribing from eventBus", "err", err)
|
||||
pb.cs.logger.Error("Error unsubscribing from eventBus", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
@@ -61,22 +61,17 @@ func newMockProxyApp(
|
||||
logger log.Logger,
|
||||
appHash []byte,
|
||||
abciResponses *tmstate.ABCIResponses,
|
||||
) (proxy.AppConnConsensus, error) {
|
||||
|
||||
) proxy.AppConnConsensus {
|
||||
clientCreator := abciclient.NewLocalCreator(&mockProxyApp{
|
||||
appHash: appHash,
|
||||
abciResponses: abciResponses,
|
||||
})
|
||||
cli, err := clientCreator(logger)
|
||||
cli, _ := clientCreator(logger)
|
||||
err := cli.Start(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if err = cli.Start(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return proxy.NewAppConnConsensus(cli, proxy.NopMetrics()), nil
|
||||
return proxy.NewAppConnConsensus(cli, proxy.NopMetrics())
|
||||
}
|
||||
|
||||
type mockProxyApp struct {
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@@ -27,12 +28,12 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/internal/proxy"
|
||||
"github.com/tendermint/tendermint/internal/pubsub"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
sf "github.com/tendermint/tendermint/internal/state/test/factory"
|
||||
"github.com/tendermint/tendermint/internal/store"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
@@ -60,11 +61,10 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe
|
||||
logger := log.TestingLogger()
|
||||
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
privValidator := loadPrivValidator(t, consensusReplayConfig)
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
cs := newStateWithConfigAndBlockStore(
|
||||
ctx,
|
||||
t,
|
||||
logger,
|
||||
consensusReplayConfig,
|
||||
state,
|
||||
@@ -73,17 +73,17 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe
|
||||
blockStore,
|
||||
)
|
||||
|
||||
bytes, err := os.ReadFile(cs.config.WalFile())
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, bytes)
|
||||
bytes, _ := os.ReadFile(cs.config.WalFile())
|
||||
t.Logf("====== WAL: \n\r%X\n", bytes)
|
||||
|
||||
require.NoError(t, cs.Start(ctx))
|
||||
err = cs.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
defer func() {
|
||||
if err := cs.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}()
|
||||
t.Cleanup(cs.Wait)
|
||||
|
||||
// This is just a signal that we haven't halted; its not something contained
|
||||
// in the WAL itself. Assuming the consensus state is running, replay of any
|
||||
// WAL, including the empty one, should eventually be followed by a new
|
||||
@@ -93,9 +93,9 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe
|
||||
Query: types.EventQueryNewBlock,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
ctxto, cancel := context.WithTimeout(ctx, 120*time.Second)
|
||||
ctx, cancel := context.WithTimeout(ctx, 120*time.Second)
|
||||
defer cancel()
|
||||
_, err = newBlockSub.Next(ctxto)
|
||||
_, err = newBlockSub.Next(ctx)
|
||||
if errors.Is(err, context.DeadlineExceeded) {
|
||||
t.Fatal("Timed out waiting for new block (see trace above)")
|
||||
} else if err != nil {
|
||||
@@ -103,17 +103,16 @@ func startNewStateAndWaitForBlock(ctx context.Context, t *testing.T, consensusRe
|
||||
}
|
||||
}
|
||||
|
||||
func sendTxs(ctx context.Context, t *testing.T, cs *State) {
|
||||
t.Helper()
|
||||
func sendTxs(ctx context.Context, cs *State) {
|
||||
for i := 0; i < 256; i++ {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
default:
|
||||
tx := []byte{byte(i)}
|
||||
|
||||
require.NoError(t, assertMempool(t, cs.txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}))
|
||||
|
||||
if err := assertMempool(cs.txNotifier).CheckTx(ctx, tx, nil, mempool.TxInfo{}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
i++
|
||||
}
|
||||
}
|
||||
@@ -134,7 +133,7 @@ func TestWALCrash(t *testing.T) {
|
||||
1},
|
||||
{"many non-empty blocks",
|
||||
func(stateDB dbm.DB, cs *State, ctx context.Context) {
|
||||
go sendTxs(ctx, t, cs)
|
||||
go sendTxs(ctx, cs)
|
||||
},
|
||||
3},
|
||||
}
|
||||
@@ -149,7 +148,7 @@ func TestWALCrash(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func crashWALandCheckLiveness(rctx context.Context, t *testing.T, consensusReplayConfig *config.Config,
|
||||
func crashWALandCheckLiveness(ctx context.Context, t *testing.T, consensusReplayConfig *config.Config,
|
||||
initFn func(dbm.DB, *State, context.Context), heightToStop int64) {
|
||||
walPanicked := make(chan error)
|
||||
crashingWal := &crashingWAL{panicCh: walPanicked, heightToStop: heightToStop}
|
||||
@@ -157,6 +156,8 @@ func crashWALandCheckLiveness(rctx context.Context, t *testing.T, consensusRepla
|
||||
i := 1
|
||||
LOOP:
|
||||
for {
|
||||
t.Logf("====== LOOP %d\n", i)
|
||||
|
||||
// create consensus state from a clean slate
|
||||
logger := log.NewNopLogger()
|
||||
blockDB := dbm.NewMemDB()
|
||||
@@ -165,10 +166,9 @@ LOOP:
|
||||
blockStore := store.NewBlockStore(blockDB)
|
||||
state, err := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
privValidator := loadPrivValidator(t, consensusReplayConfig)
|
||||
privValidator := loadPrivValidator(consensusReplayConfig)
|
||||
cs := newStateWithConfigAndBlockStore(
|
||||
rctx,
|
||||
t,
|
||||
ctx,
|
||||
logger,
|
||||
consensusReplayConfig,
|
||||
state,
|
||||
@@ -178,7 +178,7 @@ LOOP:
|
||||
)
|
||||
|
||||
// start sending transactions
|
||||
ctx, cancel := context.WithCancel(rctx)
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
initFn(stateDB, cs, ctx)
|
||||
|
||||
// clean up WAL file from the previous iteration
|
||||
@@ -201,9 +201,9 @@ LOOP:
|
||||
i++
|
||||
|
||||
select {
|
||||
case <-rctx.Done():
|
||||
t.Fatal("context canceled before test completed")
|
||||
case err := <-walPanicked:
|
||||
t.Logf("WAL panicked: %v", err)
|
||||
|
||||
// make sure we can make blocks after a crash
|
||||
startNewStateAndWaitForBlock(ctx, t, consensusReplayConfig, cs.Height, blockDB, stateStore)
|
||||
|
||||
@@ -335,7 +335,6 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
|
||||
css, genDoc, cfg, cleanup := randConsensusNetWithPeers(
|
||||
ctx,
|
||||
t,
|
||||
cfg,
|
||||
nVals,
|
||||
nPeers,
|
||||
@@ -343,10 +342,7 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
newMockTickerFunc(true),
|
||||
newPersistentKVStoreWithPath)
|
||||
sim.Config = cfg
|
||||
|
||||
var err error
|
||||
sim.GenesisState, err = sm.MakeGenesisState(genDoc)
|
||||
require.NoError(t, err)
|
||||
sim.GenesisState, _ = sm.MakeGenesisState(genDoc)
|
||||
sim.CleanupFunc = cleanup
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
@@ -363,15 +359,15 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
// start the machine
|
||||
startTestRound(ctx, css[0], height, round)
|
||||
incrementHeight(vss...)
|
||||
ensureNewRound(t, newRoundCh, height, 0)
|
||||
ensureNewProposal(t, proposalCh, height, round)
|
||||
ensureNewRound(newRoundCh, height, 0)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs := css[0].GetRoundState()
|
||||
|
||||
signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
|
||||
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
|
||||
signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType,
|
||||
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
|
||||
vss[1:nVals]...)
|
||||
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
// HEIGHT 2
|
||||
height++
|
||||
@@ -381,15 +377,13 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
valPubKey1ABCI, err := encoding.PubKeyToProto(newValidatorPubKey1)
|
||||
require.NoError(t, err)
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
|
||||
err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx1, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
propBlock, _, err := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
require.NoError(t, err)
|
||||
propBlockParts, err := propBlock.MakePartSet(partSize)
|
||||
require.NoError(t, err)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx1, nil, mempool.TxInfo{})
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
|
||||
proposal := types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time)
|
||||
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -400,12 +394,12 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(t, proposalCh, height, round)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
|
||||
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
|
||||
signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType,
|
||||
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
|
||||
vss[1:nVals]...)
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
// HEIGHT 3
|
||||
height++
|
||||
@@ -415,15 +409,13 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
updatePubKey1ABCI, err := encoding.PubKeyToProto(updateValidatorPubKey1)
|
||||
require.NoError(t, err)
|
||||
updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25)
|
||||
err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, updateValidatorTx1, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
propBlock, _, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
require.NoError(t, err)
|
||||
propBlockParts, err = propBlock.MakePartSet(partSize)
|
||||
require.NoError(t, err)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(ctx, updateValidatorTx1, nil, mempool.TxInfo{})
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
|
||||
proposal = types.NewProposal(vss[2].Height, round, -1, blockID, propBlock.Header.Time)
|
||||
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[2].SignProposal(ctx, cfg.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -434,12 +426,12 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(t, proposalCh, height, round)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
signAddVotes(ctx, t, css[0], tmproto.PrecommitType, sim.Config.ChainID(),
|
||||
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
|
||||
signAddVotes(ctx, sim.Config, css[0], tmproto.PrecommitType,
|
||||
rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(),
|
||||
vss[1:nVals]...)
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
// HEIGHT 4
|
||||
height++
|
||||
@@ -449,23 +441,21 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
newVal2ABCI, err := encoding.PubKeyToProto(newValidatorPubKey2)
|
||||
require.NoError(t, err)
|
||||
newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower)
|
||||
err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx2, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx2, nil, mempool.TxInfo{})
|
||||
assert.Nil(t, err)
|
||||
newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
newVal3ABCI, err := encoding.PubKeyToProto(newValidatorPubKey3)
|
||||
require.NoError(t, err)
|
||||
newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower)
|
||||
err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, newValidatorTx3, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
propBlock, _, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
require.NoError(t, err)
|
||||
propBlockParts, err = propBlock.MakePartSet(partSize)
|
||||
require.NoError(t, err)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(ctx, newValidatorTx3, nil, mempool.TxInfo{})
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
newVss := make([]*validatorStub, nVals+1)
|
||||
copy(newVss, vss[:nVals+1])
|
||||
newVss = sortVValidatorStubsByPower(ctx, t, newVss)
|
||||
sort.Sort(ValidatorStubsByPower(newVss))
|
||||
|
||||
valIndexFn := func(cssIdx int) int {
|
||||
for i, vs := range newVss {
|
||||
@@ -479,14 +469,12 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
return i
|
||||
}
|
||||
}
|
||||
t.Fatalf("validator css[%d] not found in newVss", cssIdx)
|
||||
return -1
|
||||
panic(fmt.Sprintf("validator css[%d] not found in newVss", cssIdx))
|
||||
}
|
||||
|
||||
selfIndex := valIndexFn(0)
|
||||
require.NotEqual(t, -1, selfIndex)
|
||||
|
||||
proposal = types.NewProposal(vss[3].Height, round, -1, blockID, propBlock.Header.Time)
|
||||
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[3].SignProposal(ctx, cfg.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -497,67 +485,59 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(t, proposalCh, height, round)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
|
||||
removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0)
|
||||
err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, removeValidatorTx2, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(ctx, removeValidatorTx2, nil, mempool.TxInfo{})
|
||||
assert.Nil(t, err)
|
||||
|
||||
rs = css[0].GetRoundState()
|
||||
for i := 0; i < nVals+1; i++ {
|
||||
if i == selfIndex {
|
||||
continue
|
||||
}
|
||||
signAddVotes(ctx, t, css[0],
|
||||
tmproto.PrecommitType, sim.Config.ChainID(),
|
||||
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
|
||||
newVss[i])
|
||||
signAddVotes(ctx, sim.Config, css[0],
|
||||
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
|
||||
rs.ProposalBlockParts.Header(), newVss[i])
|
||||
}
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
// HEIGHT 5
|
||||
height++
|
||||
incrementHeight(vss...)
|
||||
// Reflect the changes to vss[nVals] at height 3 and resort newVss.
|
||||
newVssIdx := valIndexFn(nVals)
|
||||
require.NotEqual(t, -1, newVssIdx)
|
||||
|
||||
newVss[newVssIdx].VotingPower = 25
|
||||
newVss = sortVValidatorStubsByPower(ctx, t, newVss)
|
||||
|
||||
sort.Sort(ValidatorStubsByPower(newVss))
|
||||
selfIndex = valIndexFn(0)
|
||||
require.NotEqual(t, -1, selfIndex)
|
||||
ensureNewProposal(t, proposalCh, height, round)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
for i := 0; i < nVals+1; i++ {
|
||||
if i == selfIndex {
|
||||
continue
|
||||
}
|
||||
signAddVotes(ctx, t, css[0],
|
||||
tmproto.PrecommitType, sim.Config.ChainID(),
|
||||
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
|
||||
newVss[i])
|
||||
signAddVotes(ctx, sim.Config, css[0],
|
||||
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
|
||||
rs.ProposalBlockParts.Header(), newVss[i])
|
||||
}
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
// HEIGHT 6
|
||||
height++
|
||||
incrementHeight(vss...)
|
||||
removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0)
|
||||
err = assertMempool(t, css[0].txNotifier).CheckTx(ctx, removeValidatorTx3, nil, mempool.TxInfo{})
|
||||
assert.NoError(t, err)
|
||||
propBlock, _, err = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
require.NoError(t, err)
|
||||
propBlockParts, err = propBlock.MakePartSet(partSize)
|
||||
require.NoError(t, err)
|
||||
err = assertMempool(css[0].txNotifier).CheckTx(ctx, removeValidatorTx3, nil, mempool.TxInfo{})
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
newVss = make([]*validatorStub, nVals+3)
|
||||
copy(newVss, vss[:nVals+3])
|
||||
newVss = sortVValidatorStubsByPower(ctx, t, newVss)
|
||||
sort.Sort(ValidatorStubsByPower(newVss))
|
||||
|
||||
selfIndex = valIndexFn(0)
|
||||
require.NotEqual(t, -1, selfIndex)
|
||||
proposal = types.NewProposal(vss[1].Height, round, -1, blockID, propBlock.Header.Time)
|
||||
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[1].SignProposal(ctx, cfg.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -568,18 +548,17 @@ func setupSimulator(ctx context.Context, t *testing.T) *simulatorTestSuite {
|
||||
if err := css[0].SetProposalAndBlock(ctx, proposal, propBlock, propBlockParts, "some peer"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
ensureNewProposal(t, proposalCh, height, round)
|
||||
ensureNewProposal(proposalCh, height, round)
|
||||
rs = css[0].GetRoundState()
|
||||
for i := 0; i < nVals+3; i++ {
|
||||
if i == selfIndex {
|
||||
continue
|
||||
}
|
||||
signAddVotes(ctx, t, css[0],
|
||||
tmproto.PrecommitType, sim.Config.ChainID(),
|
||||
types.BlockID{Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header()},
|
||||
newVss[i])
|
||||
signAddVotes(ctx, sim.Config, css[0],
|
||||
tmproto.PrecommitType, rs.ProposalBlock.Hash(),
|
||||
rs.ProposalBlockParts.Header(), newVss[i])
|
||||
}
|
||||
ensureNewRound(t, newRoundCh, height+1, 0)
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
sim.Chain = make([]*types.Block, 0)
|
||||
sim.Commits = make([]*types.Commit, 0)
|
||||
@@ -681,49 +660,56 @@ func TestMockProxyApp(t *testing.T) {
|
||||
err = proto.Unmarshal(bytes, loadedAbciRes)
|
||||
require.NoError(t, err)
|
||||
|
||||
mock, err := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes)
|
||||
require.NoError(t, err)
|
||||
mock := newMockProxyApp(ctx, logger, []byte("mock_hash"), loadedAbciRes)
|
||||
|
||||
abciRes := new(tmstate.ABCIResponses)
|
||||
abciRes.DeliverTxs = make([]*abci.ResponseDeliverTx, len(loadedAbciRes.DeliverTxs))
|
||||
// Execute transactions and get hash.
|
||||
proxyCb := func(req *abci.Request, res *abci.Response) {
|
||||
if r, ok := res.Value.(*abci.Response_DeliverTx); ok {
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
txRes := r.DeliverTx
|
||||
if txRes.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log)
|
||||
invalidTxs++
|
||||
}
|
||||
abciRes.DeliverTxs[txIndex] = txRes
|
||||
txIndex++
|
||||
}
|
||||
}
|
||||
mock.SetResponseCallback(proxyCb)
|
||||
|
||||
someTx := []byte("tx")
|
||||
resp, err := mock.DeliverTx(ctx, abci.RequestDeliverTx{Tx: someTx})
|
||||
// TODO: make use of res.Log
|
||||
// TODO: make use of this info
|
||||
// Blocks may include invalid txs.
|
||||
if resp.Code == abci.CodeTypeOK {
|
||||
validTxs++
|
||||
} else {
|
||||
invalidTxs++
|
||||
}
|
||||
abciRes.DeliverTxs[txIndex] = resp
|
||||
txIndex++
|
||||
|
||||
_, err = mock.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: someTx})
|
||||
assert.NoError(t, err)
|
||||
})
|
||||
assert.True(t, validTxs == 1)
|
||||
assert.True(t, invalidTxs == 0)
|
||||
}
|
||||
|
||||
func tempWALWithData(t *testing.T, data []byte) string {
|
||||
t.Helper()
|
||||
|
||||
func tempWALWithData(data []byte) string {
|
||||
walFile, err := os.CreateTemp("", "wal")
|
||||
require.NoError(t, err, "failed to create temp WAL file")
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to create temp WAL file: %v", err))
|
||||
}
|
||||
_, err = walFile.Write(data)
|
||||
require.NoError(t, err, "failed to write to temp WAL file")
|
||||
|
||||
require.NoError(t, walFile.Close(), "failed to close temp WAL file")
|
||||
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to write to temp WAL file: %v", err))
|
||||
}
|
||||
if err := walFile.Close(); err != nil {
|
||||
panic(fmt.Sprintf("failed to close temp WAL file: %v", err))
|
||||
}
|
||||
return walFile.Name()
|
||||
}
|
||||
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks.
|
||||
// Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(
|
||||
rctx context.Context,
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
sim *simulatorTestSuite,
|
||||
nBlocks int,
|
||||
@@ -735,8 +721,9 @@ func testHandshakeReplay(
|
||||
var store *mockBlockStore
|
||||
var stateDB dbm.DB
|
||||
var genesisState sm.State
|
||||
var cancel context.CancelFunc
|
||||
|
||||
ctx, cancel := context.WithCancel(rctx)
|
||||
ctx, cancel = context.WithCancel(ctx)
|
||||
t.Cleanup(cancel)
|
||||
|
||||
cfg := sim.Config
|
||||
@@ -752,28 +739,29 @@ func testHandshakeReplay(
|
||||
cfg = sim.Config
|
||||
chain = append([]*types.Block{}, sim.Chain...) // copy chain
|
||||
commits = sim.Commits
|
||||
store = newMockBlockStore(t, cfg, genesisState.ConsensusParams)
|
||||
store = newMockBlockStore(cfg, genesisState.ConsensusParams)
|
||||
} else { // test single node
|
||||
testConfig, err := ResetConfig(fmt.Sprintf("%s_%v_s", t.Name(), mode))
|
||||
require.NoError(t, err)
|
||||
defer func() { _ = os.RemoveAll(testConfig.RootDir) }()
|
||||
walBody, err := WALWithNBlocks(ctx, t, logger, numBlocks)
|
||||
walBody, err := WALWithNBlocks(ctx, t, numBlocks)
|
||||
require.NoError(t, err)
|
||||
walFile := tempWALWithData(t, walBody)
|
||||
walFile := tempWALWithData(walBody)
|
||||
cfg.Consensus.SetWalFile(walFile)
|
||||
|
||||
privVal, err := privval.LoadFilePV(cfg.PrivValidator.KeyFile(), cfg.PrivValidator.StateFile())
|
||||
require.NoError(t, err)
|
||||
|
||||
wal, err := NewWAL(ctx, logger, walFile)
|
||||
wal, err := NewWAL(logger, walFile)
|
||||
require.NoError(t, err)
|
||||
err = wal.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() { cancel(); wal.Wait() })
|
||||
chain, commits = makeBlockchainFromWAL(t, wal)
|
||||
chain, commits, err = makeBlockchainFromWAL(wal)
|
||||
require.NoError(t, err)
|
||||
pubKey, err := privVal.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
stateDB, genesisState, store = stateAndStore(t, cfg, pubKey, kvstore.ProtocolVersion)
|
||||
stateDB, genesisState, store = stateAndStore(cfg, pubKey, kvstore.ProtocolVersion)
|
||||
|
||||
}
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
@@ -784,7 +772,6 @@ func testHandshakeReplay(
|
||||
// run the chain through state.ApplyBlock to build up the tendermint state
|
||||
state = buildTMStateFromChain(
|
||||
ctx,
|
||||
t,
|
||||
cfg,
|
||||
logger,
|
||||
sim.Mempool,
|
||||
@@ -799,7 +786,7 @@ func testHandshakeReplay(
|
||||
latestAppHash := state.AppHash
|
||||
|
||||
// make a new client creator
|
||||
kvstoreApp := kvstore.NewPersistentKVStoreApplication(logger,
|
||||
kvstoreApp := kvstore.NewPersistentKVStoreApplication(
|
||||
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_a_r%d", nBlocks, mode, rand.Int())))
|
||||
t.Cleanup(func() { require.NoError(t, kvstoreApp.Close()) })
|
||||
|
||||
@@ -812,7 +799,7 @@ func testHandshakeReplay(
|
||||
stateStore := sm.NewStore(stateDB1)
|
||||
err := stateStore.Save(genesisState)
|
||||
require.NoError(t, err)
|
||||
buildAppStateFromChain(ctx, t, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store)
|
||||
buildAppStateFromChain(ctx, proxyApp, stateStore, sim.Mempool, sim.Evpool, genesisState, chain, nBlocks, mode, store)
|
||||
}
|
||||
|
||||
// Prune block store if requested
|
||||
@@ -825,23 +812,25 @@ func testHandshakeReplay(
|
||||
}
|
||||
|
||||
// now start the app using the handshake - it should sync
|
||||
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
|
||||
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
|
||||
proxyApp := proxy.NewAppConns(clientCreator2, logger, proxy.NopMetrics())
|
||||
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
|
||||
if err := proxyApp.Start(ctx); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
|
||||
t.Cleanup(func() { cancel(); proxyApp.Wait() })
|
||||
|
||||
err = handshaker.Handshake(ctx, proxyApp)
|
||||
err := handshaker.Handshake(ctx, proxyApp)
|
||||
if expectError {
|
||||
require.Error(t, err)
|
||||
return
|
||||
} else if err != nil {
|
||||
t.Fatalf("Error on abci handshake: %v", err)
|
||||
}
|
||||
require.NoError(t, err, "Error on abci handshake")
|
||||
|
||||
// get the latest app hash from the app
|
||||
res, err := proxyApp.Query().Info(ctx, abci.RequestInfo{Version: ""})
|
||||
res, err := proxyApp.Query().InfoSync(ctx, abci.RequestInfo{Version: ""})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@@ -868,7 +857,6 @@ func testHandshakeReplay(
|
||||
|
||||
func applyBlock(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
stateStore sm.Store,
|
||||
mempool mempool.Mempool,
|
||||
evpool sm.EvidencePool,
|
||||
@@ -880,17 +868,16 @@ func applyBlock(
|
||||
testPartSize := types.BlockPartSizeBytes
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
|
||||
bps, err := blk.MakePartSet(testPartSize)
|
||||
require.NoError(t, err)
|
||||
blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: bps.Header()}
|
||||
blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()}
|
||||
newState, err := blockExec.ApplyBlock(ctx, st, blkID, blk)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return newState
|
||||
}
|
||||
|
||||
func buildAppStateFromChain(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
proxyApp proxy.AppConns,
|
||||
stateStore sm.Store,
|
||||
mempool mempool.Mempool,
|
||||
@@ -901,45 +888,46 @@ func buildAppStateFromChain(
|
||||
mode uint,
|
||||
blockStore *mockBlockStore,
|
||||
) {
|
||||
t.Helper()
|
||||
// start a new app without handshake, play nBlocks blocks
|
||||
require.NoError(t, proxyApp.Start(ctx))
|
||||
if err := proxyApp.Start(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
_, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{
|
||||
if _, err := proxyApp.Consensus().InitChainSync(ctx, abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, stateStore.Save(state)) // save height 1's validatorsInfo
|
||||
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
|
||||
panic(err)
|
||||
}
|
||||
switch mode {
|
||||
case 0:
|
||||
for i := 0; i < nBlocks; i++ {
|
||||
block := chain[i]
|
||||
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(ctx, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
}
|
||||
case 1, 2, 3:
|
||||
for i := 0; i < nBlocks-1; i++ {
|
||||
block := chain[i]
|
||||
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(ctx, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
}
|
||||
|
||||
if mode == 2 || mode == 3 {
|
||||
// update the kvstore height and apphash
|
||||
// as if we ran commit but not
|
||||
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore)
|
||||
state = applyBlock(ctx, stateStore, mempool, evpool, state, chain[nBlocks-1], proxyApp, blockStore)
|
||||
}
|
||||
default:
|
||||
require.Fail(t, "unknown mode %v", mode)
|
||||
panic(fmt.Sprintf("unknown mode %v", mode))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func buildTMStateFromChain(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
cfg *config.Config,
|
||||
logger log.Logger,
|
||||
mempool mempool.Mempool,
|
||||
@@ -951,45 +939,46 @@ func buildTMStateFromChain(
|
||||
mode uint,
|
||||
blockStore *mockBlockStore,
|
||||
) sm.State {
|
||||
t.Helper()
|
||||
|
||||
// run the whole chain against this client to build up the tendermint state
|
||||
kvstoreApp := kvstore.NewPersistentKVStoreApplication(logger,
|
||||
kvstoreApp := kvstore.NewPersistentKVStoreApplication(
|
||||
filepath.Join(cfg.DBDir(), fmt.Sprintf("replay_test_%d_%d_t", nBlocks, mode)))
|
||||
defer kvstoreApp.Close()
|
||||
clientCreator := abciclient.NewLocalCreator(kvstoreApp)
|
||||
|
||||
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
|
||||
require.NoError(t, proxyApp.Start(ctx))
|
||||
if err := proxyApp.Start(ctx); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
_, err := proxyApp.Consensus().InitChain(ctx, abci.RequestInitChain{
|
||||
if _, err := proxyApp.Consensus().InitChainSync(ctx, abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
}); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := stateStore.Save(state); err != nil { // save height 1's validatorsInfo
|
||||
panic(err)
|
||||
}
|
||||
switch mode {
|
||||
case 0:
|
||||
// sync right up
|
||||
for _, block := range chain {
|
||||
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(ctx, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
}
|
||||
|
||||
case 1, 2, 3:
|
||||
// sync up to the penultimate as if we stored the block.
|
||||
// whether we commit or not depends on the appHash
|
||||
for _, block := range chain[:len(chain)-1] {
|
||||
state = applyBlock(ctx, t, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
state = applyBlock(ctx, stateStore, mempool, evpool, state, block, proxyApp, blockStore)
|
||||
}
|
||||
|
||||
// apply the final block to a state copy so we can
|
||||
// get the right next appHash but keep the state back
|
||||
applyBlock(ctx, t, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore)
|
||||
applyBlock(ctx, stateStore, mempool, evpool, state, chain[len(chain)-1], proxyApp, blockStore)
|
||||
default:
|
||||
require.Fail(t, "unknown mode %v", mode)
|
||||
panic(fmt.Sprintf("unknown mode %v", mode))
|
||||
}
|
||||
|
||||
return state
|
||||
@@ -1012,14 +1001,12 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) {
|
||||
const appVersion = 0x0
|
||||
pubKey, err := privVal.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(t, cfg, pubKey, appVersion)
|
||||
stateDB, state, store := stateAndStore(cfg, pubKey, appVersion)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
genDoc, err := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
genDoc, _ := sm.MakeGenesisDocFromFile(cfg.GenesisFile())
|
||||
state.LastValidators = state.Validators.Copy()
|
||||
// mode = 0 for committing all the blocks
|
||||
blocks := sf.MakeBlocks(ctx, t, 3, &state, privVal)
|
||||
|
||||
blocks := sf.MakeBlocks(3, &state, privVal)
|
||||
store.chain = blocks
|
||||
|
||||
logger := log.TestingLogger()
|
||||
@@ -1090,14 +1077,17 @@ func (app *badApp) Commit() abci.ResponseCommit {
|
||||
//--------------------------
|
||||
// utils for making blocks
|
||||
|
||||
func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Commit) {
|
||||
t.Helper()
|
||||
func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
var height int64
|
||||
|
||||
// Search for height marker
|
||||
gr, found, err := wal.SearchForEndHeight(height, &WALSearchOptions{})
|
||||
require.NoError(t, err)
|
||||
require.True(t, found, "wal does not contain height %d", height)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !found {
|
||||
return nil, nil, fmt.Errorf("wal does not contain height %d", height)
|
||||
}
|
||||
defer gr.Close()
|
||||
|
||||
// log.Notice("Build a blockchain by reading from the WAL")
|
||||
@@ -1114,8 +1104,9 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
msg, err := dec.Decode()
|
||||
if err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
require.NoError(t, err)
|
||||
|
||||
piece := readPieceFromWAL(msg)
|
||||
if piece == nil {
|
||||
@@ -1128,20 +1119,25 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
if thisBlockParts != nil {
|
||||
var pbb = new(tmproto.Block)
|
||||
bz, err := io.ReadAll(thisBlockParts.GetReader())
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, proto.Unmarshal(bz, pbb))
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, block.Height, height+1,
|
||||
"read bad block from wal. got height %d, expected %d", block.Height, height+1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
if block.Height != height+1 {
|
||||
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
|
||||
}
|
||||
commitHeight := thisBlockCommit.Height
|
||||
require.Equal(t, commitHeight, height+1,
|
||||
"commit doesnt match. got height %d, expected %d", commitHeight, height+1)
|
||||
|
||||
if commitHeight != height+1 {
|
||||
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
height++
|
||||
@@ -1150,7 +1146,9 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
thisBlockParts = types.NewPartSetFromHeader(*p)
|
||||
case *types.Part:
|
||||
_, err := thisBlockParts.AddPart(p)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
case *types.Vote:
|
||||
if p.Type == tmproto.PrecommitType {
|
||||
thisBlockCommit = types.NewCommit(p.Height, p.Round,
|
||||
@@ -1160,21 +1158,28 @@ func makeBlockchainFromWAL(t *testing.T, wal WAL) ([]*types.Block, []*types.Comm
|
||||
}
|
||||
// grab the last block too
|
||||
bz, err := io.ReadAll(thisBlockParts.GetReader())
|
||||
require.NoError(t, err)
|
||||
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
var pbb = new(tmproto.Block)
|
||||
require.NoError(t, proto.Unmarshal(bz, pbb))
|
||||
|
||||
err = proto.Unmarshal(bz, pbb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, block.Height, height+1, "read bad block from wal. got height %d, expected %d", block.Height, height+1)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if block.Height != height+1 {
|
||||
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
|
||||
}
|
||||
commitHeight := thisBlockCommit.Height
|
||||
require.Equal(t, commitHeight, height+1, "commit does not match. got height %d, expected %d", commitHeight, height+1)
|
||||
|
||||
if commitHeight != height+1 {
|
||||
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
return blocks, commits
|
||||
return blocks, commits, nil
|
||||
}
|
||||
|
||||
func readPieceFromWAL(msg *TimedWALMessage) interface{} {
|
||||
@@ -1198,19 +1203,17 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} {
|
||||
|
||||
// fresh state and mock store
|
||||
func stateAndStore(
|
||||
t *testing.T,
|
||||
cfg *config.Config,
|
||||
pubKey crypto.PubKey,
|
||||
appVersion uint64,
|
||||
) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
appVersion uint64) (dbm.DB, sm.State, *mockBlockStore) {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state, err := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
|
||||
require.NoError(t, err)
|
||||
state, _ := sm.MakeGenesisStateFromFile(cfg.GenesisFile())
|
||||
state.Version.Consensus.App = appVersion
|
||||
store := newMockBlockStore(t, cfg, state.ConsensusParams)
|
||||
require.NoError(t, stateStore.Save(state))
|
||||
|
||||
store := newMockBlockStore(cfg, state.ConsensusParams)
|
||||
if err := stateStore.Save(state); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return stateDB, state, store
|
||||
}
|
||||
|
||||
@@ -1223,16 +1226,11 @@ type mockBlockStore struct {
|
||||
chain []*types.Block
|
||||
commits []*types.Commit
|
||||
base int64
|
||||
t *testing.T
|
||||
}
|
||||
|
||||
// TODO: NewBlockStore(db.NewMemDB) ...
|
||||
func newMockBlockStore(t *testing.T, cfg *config.Config, params types.ConsensusParams) *mockBlockStore {
|
||||
return &mockBlockStore{
|
||||
cfg: cfg,
|
||||
params: params,
|
||||
t: t,
|
||||
}
|
||||
func newMockBlockStore(cfg *config.Config, params types.ConsensusParams) *mockBlockStore {
|
||||
return &mockBlockStore{cfg, params, nil, nil, 0}
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) }
|
||||
@@ -1246,10 +1244,8 @@ func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
func (bs *mockBlockStore) LoadBlockMetaByHash(hash []byte) *types.BlockMeta { return nil }
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
bps, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(bs.t, err)
|
||||
return &types.BlockMeta{
|
||||
BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()},
|
||||
BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()},
|
||||
Header: block.Header,
|
||||
}
|
||||
}
|
||||
@@ -1281,9 +1277,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
votePower := 10 + int64(rand.Uint32())
|
||||
val, _, err := factory.Validator(ctx, votePower)
|
||||
require.NoError(t, err)
|
||||
val, _ := factory.RandValidator(true, 10)
|
||||
vals := types.NewValidatorSet([]*types.Validator{val})
|
||||
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
|
||||
clientCreator := abciclient.NewLocalCreator(app)
|
||||
@@ -1296,7 +1290,7 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
pubKey, err := privVal.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
stateDB, state, store := stateAndStore(t, cfg, pubKey, 0x0)
|
||||
stateDB, state, store := stateAndStore(cfg, pubKey, 0x0)
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
|
||||
oldValAddr := state.Validators.Validators[0].Address
|
||||
@@ -1308,10 +1302,13 @@ func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
logger := log.TestingLogger()
|
||||
handshaker := NewHandshaker(logger, stateStore, state, store, eventbus.NopEventBus{}, genDoc)
|
||||
proxyApp := proxy.NewAppConns(clientCreator, logger, proxy.NopMetrics())
|
||||
require.NoError(t, proxyApp.Start(ctx), "Error starting proxy app connections")
|
||||
|
||||
require.NoError(t, handshaker.Handshake(ctx, proxyApp), "error on abci handshake")
|
||||
if err := proxyApp.Start(ctx); err != nil {
|
||||
t.Fatalf("Error starting proxy app connections: %v", err)
|
||||
}
|
||||
|
||||
if err := handshaker.Handshake(ctx, proxyApp); err != nil {
|
||||
t.Fatalf("Error on abci handshake: %v", err)
|
||||
}
|
||||
// reload the state, check the validator set was updated
|
||||
state, err = stateStore.Load()
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -3,13 +3,11 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"runtime/debug"
|
||||
"sort"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -19,8 +17,10 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
"github.com/tendermint/tendermint/internal/libs/fail"
|
||||
sm "github.com/tendermint/tendermint/internal/state"
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
@@ -131,7 +131,7 @@ type State struct {
|
||||
nSteps int
|
||||
|
||||
// some functions can be overwritten for testing
|
||||
decideProposal func(ctx context.Context, height int64, round int32)
|
||||
decideProposal func(height int64, round int32)
|
||||
doPrevote func(ctx context.Context, height int64, round int32)
|
||||
setProposal func(proposal *types.Proposal) error
|
||||
|
||||
@@ -178,7 +178,7 @@ func NewState(
|
||||
doWALCatchup: true,
|
||||
wal: nilWAL{},
|
||||
evpool: evpool,
|
||||
evsw: tmevents.NewEventSwitch(logger),
|
||||
evsw: tmevents.NewEventSwitch(),
|
||||
metrics: NopMetrics(),
|
||||
onStopCh: make(chan *cstypes.RoundState),
|
||||
}
|
||||
@@ -253,14 +253,14 @@ func (cs *State) GetRoundState() *cstypes.RoundState {
|
||||
func (cs *State) GetRoundStateJSON() ([]byte, error) {
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
return json.Marshal(cs.RoundState)
|
||||
return tmjson.Marshal(cs.RoundState)
|
||||
}
|
||||
|
||||
// GetRoundStateSimpleJSON returns a json of RoundStateSimple
|
||||
func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) {
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
return json.Marshal(cs.RoundState.RoundStateSimple())
|
||||
return tmjson.Marshal(cs.RoundState.RoundStateSimple())
|
||||
}
|
||||
|
||||
// GetValidators returns a copy of the current validators.
|
||||
@@ -272,7 +272,7 @@ func (cs *State) GetValidators() (int64, []*types.Validator) {
|
||||
|
||||
// SetPrivValidator sets the private validator account for signing votes. It
|
||||
// immediately requests pubkey and caches it.
|
||||
func (cs *State) SetPrivValidator(ctx context.Context, priv types.PrivValidator) {
|
||||
func (cs *State) SetPrivValidator(priv types.PrivValidator) {
|
||||
cs.mtx.Lock()
|
||||
defer cs.mtx.Unlock()
|
||||
|
||||
@@ -298,7 +298,7 @@ func (cs *State) SetPrivValidator(ctx context.Context, priv types.PrivValidator)
|
||||
}
|
||||
}
|
||||
|
||||
if err := cs.updatePrivValidatorPubKey(ctx); err != nil {
|
||||
if err := cs.updatePrivValidatorPubKey(); err != nil {
|
||||
cs.logger.Error("failed to get private validator pubkey", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -489,7 +489,7 @@ func (cs *State) Wait() {
|
||||
// OpenWAL opens a file to log all consensus messages and timeouts for
|
||||
// deterministic accountability.
|
||||
func (cs *State) OpenWAL(ctx context.Context, walFile string) (WAL, error) {
|
||||
wal, err := NewWAL(ctx, cs.logger.With("wal", walFile), walFile)
|
||||
wal, err := NewWAL(cs.logger.With("wal", walFile), walFile)
|
||||
if err != nil {
|
||||
cs.logger.Error("failed to open WAL", "file", walFile, "err", err)
|
||||
return nil, err
|
||||
@@ -755,7 +755,6 @@ func (cs *State) updateToState(ctx context.Context, state sm.State) {
|
||||
|
||||
cs.Validators = validators
|
||||
cs.Proposal = nil
|
||||
cs.ProposalReceiveTime = time.Time{}
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = nil
|
||||
cs.LockedRound = -1
|
||||
@@ -867,6 +866,14 @@ func (cs *State) receiveRoutine(ctx context.Context, maxSteps int) {
|
||||
))
|
||||
}
|
||||
|
||||
if _, ok := mi.Msg.(*VoteMessage); ok {
|
||||
// we actually want to simulate failing during
|
||||
// the previous WriteSync, but this isn't easy to do.
|
||||
// Equivalent would be to fail here and manually remove
|
||||
// some bytes from the end of the wal.
|
||||
fail.Fail() // XXX
|
||||
}
|
||||
|
||||
// handles proposals, block parts, votes
|
||||
cs.handleMsg(ctx, mi)
|
||||
|
||||
@@ -1096,7 +1103,6 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) {
|
||||
} else {
|
||||
logger.Debug("resetting proposal info")
|
||||
cs.Proposal = nil
|
||||
cs.ProposalReceiveTime = time.Time{}
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = nil
|
||||
}
|
||||
@@ -1119,10 +1125,9 @@ func (cs *State) enterNewRound(ctx context.Context, height int64, round int32) {
|
||||
cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round,
|
||||
cstypes.RoundStepNewRound)
|
||||
}
|
||||
return
|
||||
} else {
|
||||
cs.enterPropose(ctx, height, round)
|
||||
}
|
||||
|
||||
cs.enterPropose(ctx, height, round)
|
||||
}
|
||||
|
||||
// needProofBlock returns true on the first height (so the genesis app hash is signed right away)
|
||||
@@ -1155,16 +1160,6 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) {
|
||||
return
|
||||
}
|
||||
|
||||
// If this validator is the proposer of this round, and the previous block time is later than
|
||||
// our local clock time, wait to propose until our local clock time has passed the block time.
|
||||
if cs.privValidatorPubKey != nil && cs.isProposer(cs.privValidatorPubKey.Address()) {
|
||||
proposerWaitTime := proposerWaitTime(tmtime.DefaultSource{}, cs.state.LastBlockTime)
|
||||
if proposerWaitTime > 0 {
|
||||
cs.scheduleTimeout(proposerWaitTime, height, round, cstypes.RoundStepNewRound)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("entering propose step", "current", fmt.Sprintf("%v/%v/%v", cs.Height, cs.Round, cs.Step))
|
||||
|
||||
defer func() {
|
||||
@@ -1185,10 +1180,12 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) {
|
||||
|
||||
// Nothing more to do if we're not a validator
|
||||
if cs.privValidator == nil {
|
||||
logger.Debug("propose step; not proposing since node is not a validator")
|
||||
logger.Debug("node is not a validator")
|
||||
return
|
||||
}
|
||||
|
||||
logger.Debug("node is a validator")
|
||||
|
||||
if cs.privValidatorPubKey == nil {
|
||||
// If this node is a validator & proposer in the current round, it will
|
||||
// miss the opportunity to create a block.
|
||||
@@ -1196,23 +1193,21 @@ func (cs *State) enterPropose(ctx context.Context, height int64, round int32) {
|
||||
return
|
||||
}
|
||||
|
||||
addr := cs.privValidatorPubKey.Address()
|
||||
address := cs.privValidatorPubKey.Address()
|
||||
|
||||
// if not a validator, we're done
|
||||
if !cs.Validators.HasAddress(addr) {
|
||||
logger.Debug("propose step; not proposing since node is not in the validator set",
|
||||
"addr", addr,
|
||||
"vals", cs.Validators)
|
||||
if !cs.Validators.HasAddress(address) {
|
||||
logger.Debug("node is not a validator", "addr", address, "vals", cs.Validators)
|
||||
return
|
||||
}
|
||||
|
||||
if cs.isProposer(addr) {
|
||||
if cs.isProposer(address) {
|
||||
logger.Debug(
|
||||
"propose step; our turn to propose",
|
||||
"proposer", addr,
|
||||
"proposer", address,
|
||||
)
|
||||
|
||||
cs.decideProposal(ctx, height, round)
|
||||
cs.decideProposal(height, round)
|
||||
} else {
|
||||
logger.Debug(
|
||||
"propose step; not our turn to propose",
|
||||
@@ -1225,7 +1220,7 @@ func (cs *State) isProposer(address []byte) bool {
|
||||
return bytes.Equal(cs.Validators.GetProposer().Address, address)
|
||||
}
|
||||
|
||||
func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round int32) {
|
||||
func (cs *State) defaultDecideProposal(height int64, round int32) {
|
||||
var block *types.Block
|
||||
var blockParts *types.PartSet
|
||||
|
||||
@@ -1235,9 +1230,8 @@ func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round
|
||||
block, blockParts = cs.ValidBlock, cs.ValidBlockParts
|
||||
} else {
|
||||
// Create a new proposal block from state/txs from the mempool.
|
||||
var err error
|
||||
block, blockParts, err = cs.createProposalBlock()
|
||||
if block == nil || err != nil {
|
||||
block, blockParts = cs.createProposalBlock()
|
||||
if block == nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -1250,13 +1244,13 @@ func (cs *State) defaultDecideProposal(ctx context.Context, height int64, round
|
||||
|
||||
// Make proposal
|
||||
propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID, block.Header.Time)
|
||||
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID)
|
||||
p := proposal.ToProto()
|
||||
|
||||
// wait the max amount we would wait for a proposal
|
||||
ctxto, cancel := context.WithTimeout(ctx, cs.config.TimeoutPropose)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), cs.config.TimeoutPropose)
|
||||
defer cancel()
|
||||
if err := cs.privValidator.SignProposal(ctxto, cs.state.ChainID, p); err == nil {
|
||||
if err := cs.privValidator.SignProposal(ctx, cs.state.ChainID, p); err == nil {
|
||||
proposal.Signature = p.Signature
|
||||
|
||||
// send proposal and block parts on internal msg queue
|
||||
@@ -1296,9 +1290,9 @@ func (cs *State) isProposalComplete() bool {
|
||||
//
|
||||
// NOTE: keep it side-effect free for clarity.
|
||||
// CONTRACT: cs.privValidator is not nil.
|
||||
func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet, err error) {
|
||||
func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
|
||||
if cs.privValidator == nil {
|
||||
return nil, nil, errors.New("entered createProposalBlock with privValidator being nil")
|
||||
panic("entered createProposalBlock with privValidator being nil")
|
||||
}
|
||||
|
||||
var commit *types.Commit
|
||||
@@ -1331,11 +1325,8 @@ func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.Pa
|
||||
|
||||
// Enter: `timeoutPropose` after entering Propose.
|
||||
// Enter: proposal block and POL is ready.
|
||||
// If we received a valid proposal within this round and we are not locked on a block,
|
||||
// we will prevote for block.
|
||||
// Otherwise, if we receive a valid proposal that matches the block we are
|
||||
// locked on or matches a block that received a POL in a round later than our
|
||||
// locked round, prevote for the proposal, otherwise vote nil.
|
||||
// Prevote for LockedBlock if we're locked, or ProposalBlock if valid.
|
||||
// Otherwise vote nil.
|
||||
func (cs *State) enterPrevote(ctx context.Context, height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
|
||||
@@ -1362,47 +1353,19 @@ func (cs *State) enterPrevote(ctx context.Context, height int64, round int32) {
|
||||
// (so we have more time to try and collect +2/3 prevotes for a single block)
|
||||
}
|
||||
|
||||
func (cs *State) proposalIsTimely() bool {
|
||||
sp := types.SynchronyParams{
|
||||
Precision: cs.state.ConsensusParams.Synchrony.Precision,
|
||||
MessageDelay: cs.state.ConsensusParams.Synchrony.MessageDelay,
|
||||
}
|
||||
|
||||
return cs.Proposal.IsTimely(cs.ProposalReceiveTime, sp, cs.state.InitialHeight)
|
||||
}
|
||||
|
||||
func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
|
||||
// Check that a proposed block was not received within this round (and thus executing this from a timeout).
|
||||
// If a block is locked, prevote that.
|
||||
if cs.LockedBlock != nil {
|
||||
logger.Debug("prevote step; already locked on a block; prevoting locked block")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header())
|
||||
return
|
||||
}
|
||||
|
||||
// If ProposalBlock is nil, prevote nil.
|
||||
if cs.ProposalBlock == nil {
|
||||
logger.Debug("prevote step: ProposalBlock is nil; prevoting nil")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
if cs.Proposal == nil {
|
||||
logger.Debug("prevote step: did not receive proposal; prevoting nil")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) {
|
||||
logger.Debug("prevote step: proposal timestamp not equal; prevoting nil")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
if cs.Proposal.POLRound == -1 && cs.LockedRound == -1 && !cs.proposalIsTimely() {
|
||||
logger.Debug("prevote step: Proposal is not timely; prevoting nil - ",
|
||||
"proposed",
|
||||
tmtime.Canonical(cs.Proposal.Timestamp).Format(time.RFC3339Nano),
|
||||
"received",
|
||||
tmtime.Canonical(cs.ProposalReceiveTime).Format(time.RFC3339Nano),
|
||||
"msg_delay",
|
||||
cs.state.ConsensusParams.Synchrony.MessageDelay,
|
||||
"precision",
|
||||
cs.state.ConsensusParams.Synchrony.Precision)
|
||||
logger.Debug("prevote step: ProposalBlock is nil")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
@@ -1411,72 +1374,16 @@ func (cs *State) defaultDoPrevote(ctx context.Context, height int64, round int32
|
||||
err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock)
|
||||
if err != nil {
|
||||
// ProposalBlock is invalid, prevote nil.
|
||||
logger.Error("prevote step: ProposalBlock is invalid; prevoting nil", "err", err)
|
||||
logger.Error("prevote step: ProposalBlock is invalid", "err", err)
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
/*
|
||||
22: upon <PROPOSAL, h_p, round_p, v, −1> from proposer(h_p, round_p) while step_p = propose do
|
||||
23: if valid(v) && (lockedRound_p = −1 || lockedValue_p = v) then
|
||||
24: broadcast <PREVOTE, h_p, round_p, id(v)>
|
||||
|
||||
Here, cs.Proposal.POLRound corresponds to the -1 in the above algorithm rule.
|
||||
This means that the proposer is producing a new proposal that has not previously
|
||||
seen a 2/3 majority by the network.
|
||||
|
||||
If we have already locked on a different value that is different from the proposed value,
|
||||
we prevote nil since we are locked on a different value. Otherwise, if we're not locked on a block
|
||||
or the proposal matches our locked block, we prevote the proposal.
|
||||
*/
|
||||
if cs.Proposal.POLRound == -1 {
|
||||
if cs.LockedRound == -1 {
|
||||
logger.Debug("prevote step: ProposalBlock is valid and there is no locked block; prevoting the proposal")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
|
||||
return
|
||||
}
|
||||
if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) {
|
||||
logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
28: upon <PROPOSAL, h_p, round_p, v, v_r> from proposer(h_p, round_p) AND 2f + 1 <PREVOTE, h_p, v_r, id(v)> while
|
||||
step_p = propose && (v_r ≥ 0 && v_r < round_p) do
|
||||
29: if valid(v) && (lockedRound_p ≤ v_r || lockedValue_p = v) then
|
||||
30: broadcast <PREVOTE, h_p, round_p, id(v)>
|
||||
|
||||
This rule is a bit confusing but breaks down as follows:
|
||||
|
||||
If we see a proposal in the current round for value 'v' that lists its valid round as 'v_r'
|
||||
AND this validator saw a 2/3 majority of the voting power prevote 'v' in round 'v_r', then we will
|
||||
issue a prevote for 'v' in this round if 'v' is valid and either matches our locked value OR
|
||||
'v_r' is a round greater than or equal to our current locked round.
|
||||
|
||||
'v_r' can be a round greater than to our current locked round if a 2/3 majority of
|
||||
the network prevoted a value in round 'v_r' but we did not lock on it, possibly because we
|
||||
missed the proposal in round 'v_r'.
|
||||
*/
|
||||
blockID, ok := cs.Votes.Prevotes(cs.Proposal.POLRound).TwoThirdsMajority()
|
||||
if ok && cs.ProposalBlock.HashesTo(blockID.Hash) && cs.Proposal.POLRound >= 0 && cs.Proposal.POLRound < cs.Round {
|
||||
if cs.LockedRound <= cs.Proposal.POLRound {
|
||||
logger.Debug("prevote step: ProposalBlock is valid and received a 2/3" +
|
||||
"majority in a round later than the locked round; prevoting the proposal")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
|
||||
return
|
||||
}
|
||||
if cs.ProposalBlock.HashesTo(cs.LockedBlock.Hash()) {
|
||||
logger.Debug("prevote step: ProposalBlock is valid and matches our locked block; prevoting the proposal")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("prevote step: ProposalBlock is valid but was not our locked block or" +
|
||||
"did not receive a more recent majority; prevoting nil")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
// Prevote cs.ProposalBlock
|
||||
// NOTE: the proposal signature is validated when it is received,
|
||||
// and the proposal block parts are validated as they are received (against the merkle hash in the proposal)
|
||||
logger.Debug("prevote step: ProposalBlock is valid")
|
||||
cs.signAddVote(ctx, tmproto.PrevoteType, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header())
|
||||
}
|
||||
|
||||
// Enter: any +2/3 prevotes at next round.
|
||||
@@ -1514,6 +1421,7 @@ func (cs *State) enterPrevoteWait(ctx context.Context, height int64, round int32
|
||||
// Enter: `timeoutPrecommit` after any +2/3 precommits.
|
||||
// Enter: +2/3 precomits for block or nil.
|
||||
// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round)
|
||||
// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil,
|
||||
// else, precommit nil otherwise.
|
||||
func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32) {
|
||||
logger := cs.logger.With("height", height, "round", round)
|
||||
@@ -1560,50 +1468,47 @@ func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32)
|
||||
panic(fmt.Sprintf("this POLRound should be %v but got %v", round, polRound))
|
||||
}
|
||||
|
||||
// +2/3 prevoted nil. Precommit nil.
|
||||
if blockID.IsNil() {
|
||||
logger.Debug("precommit step: +2/3 prevoted for nil; precommitting nil")
|
||||
// +2/3 prevoted nil. Unlock and precommit nil.
|
||||
if len(blockID.Hash) == 0 {
|
||||
if cs.LockedBlock == nil {
|
||||
logger.Debug("precommit step; +2/3 prevoted for nil")
|
||||
} else {
|
||||
logger.Debug("precommit step; +2/3 prevoted for nil; unlocking")
|
||||
cs.LockedRound = -1
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
|
||||
if err := cs.eventBus.PublishEventUnlock(ctx, cs.RoundStateEvent()); err != nil {
|
||||
logger.Error("failed publishing event unlock", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
// At this point, +2/3 prevoted for a particular block.
|
||||
|
||||
// If we never received a proposal for this block, we must precommit nil
|
||||
if cs.Proposal == nil || cs.ProposalBlock == nil {
|
||||
logger.Debug("precommit step; did not receive proposal, precommitting nil")
|
||||
cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
// If the proposal time does not match the block time, precommit nil.
|
||||
if !cs.Proposal.Timestamp.Equal(cs.ProposalBlock.Header.Time) {
|
||||
logger.Debug("precommit step: proposal timestamp not equal; precommitting nil")
|
||||
cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
// If we're already locked on that block, precommit it, and update the LockedRound
|
||||
if cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
logger.Debug("precommit step: +2/3 prevoted locked block; relocking")
|
||||
logger.Debug("precommit step; +2/3 prevoted locked block; relocking")
|
||||
cs.LockedRound = round
|
||||
|
||||
if err := cs.eventBus.PublishEventRelock(ctx, cs.RoundStateEvent()); err != nil {
|
||||
logger.Error("precommit step: failed publishing event relock", "err", err)
|
||||
logger.Error("failed publishing event relock", "err", err)
|
||||
}
|
||||
|
||||
cs.signAddVote(ctx, tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader)
|
||||
return
|
||||
}
|
||||
|
||||
// If greater than 2/3 of the voting power on the network prevoted for
|
||||
// the proposed block, update our locked block to this block and issue a
|
||||
// precommit vote for it.
|
||||
// If +2/3 prevoted for proposal block, stage and precommit it
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
logger.Debug("precommit step: +2/3 prevoted proposal block; locking", "hash", blockID.Hash)
|
||||
logger.Debug("precommit step; +2/3 prevoted proposal block; locking", "hash", blockID.Hash)
|
||||
|
||||
// Validate the block.
|
||||
if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil {
|
||||
panic(fmt.Sprintf("precommit step: +2/3 prevoted for an invalid block %v; relocking", err))
|
||||
panic(fmt.Sprintf("precommit step; +2/3 prevoted for an invalid block: %v", err))
|
||||
}
|
||||
|
||||
cs.LockedRound = round
|
||||
@@ -1611,7 +1516,7 @@ func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32)
|
||||
cs.LockedBlockParts = cs.ProposalBlockParts
|
||||
|
||||
if err := cs.eventBus.PublishEventLock(ctx, cs.RoundStateEvent()); err != nil {
|
||||
logger.Error("precommit step: failed publishing event lock", "err", err)
|
||||
logger.Error("failed publishing event lock", "err", err)
|
||||
}
|
||||
|
||||
cs.signAddVote(ctx, tmproto.PrecommitType, blockID.Hash, blockID.PartSetHeader)
|
||||
@@ -1619,14 +1524,23 @@ func (cs *State) enterPrecommit(ctx context.Context, height int64, round int32)
|
||||
}
|
||||
|
||||
// There was a polka in this round for a block we don't have.
|
||||
// Fetch that block, and precommit nil.
|
||||
logger.Debug("precommit step: +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID)
|
||||
// Fetch that block, unlock, and precommit nil.
|
||||
// The +2/3 prevotes for this round is the POL for our unlock.
|
||||
logger.Debug("precommit step; +2/3 prevotes for a block we do not have; voting nil", "block_id", blockID)
|
||||
|
||||
cs.LockedRound = -1
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) {
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
}
|
||||
|
||||
if err := cs.eventBus.PublishEventUnlock(ctx, cs.RoundStateEvent()); err != nil {
|
||||
logger.Error("failed publishing event unlock", "err", err)
|
||||
}
|
||||
|
||||
cs.signAddVote(ctx, tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
}
|
||||
|
||||
@@ -1734,7 +1648,7 @@ func (cs *State) tryFinalizeCommit(ctx context.Context, height int64) {
|
||||
}
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
if !ok || blockID.IsNil() {
|
||||
if !ok || len(blockID.Hash) == 0 {
|
||||
logger.Error("failed attempt to finalize commit; there was no +2/3 majority or +2/3 was for nil")
|
||||
return
|
||||
}
|
||||
@@ -1765,8 +1679,6 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
|
||||
return
|
||||
}
|
||||
|
||||
cs.calculatePrevoteMessageDelayMetrics()
|
||||
|
||||
blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority()
|
||||
block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts
|
||||
|
||||
@@ -1792,6 +1704,8 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
|
||||
)
|
||||
logger.Debug(fmt.Sprintf("%v", block))
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Save to blockStore.
|
||||
if cs.blockStore.Height() < block.Height {
|
||||
// NOTE: the seenCommit is local justification to commit this block,
|
||||
@@ -1804,6 +1718,8 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
|
||||
logger.Debug("calling finalizeCommit on already stored block", "height", block.Height)
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Write EndHeightMessage{} for this height, implying that the blockstore
|
||||
// has saved the block.
|
||||
//
|
||||
@@ -1825,6 +1741,8 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
|
||||
))
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Create a copy of the state for staging and an event cache for txs.
|
||||
stateCopy := cs.state.Copy()
|
||||
|
||||
@@ -1843,14 +1761,18 @@ func (cs *State) finalizeCommit(ctx context.Context, height int64) {
|
||||
return
|
||||
}
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// must be called before we update state
|
||||
cs.RecordMetrics(height, block)
|
||||
|
||||
// NewHeightStep!
|
||||
cs.updateToState(ctx, stateCopy)
|
||||
|
||||
fail.Fail() // XXX
|
||||
|
||||
// Private validator might have changed it's key pair => refetch pubkey.
|
||||
if err := cs.updatePrivValidatorPubKey(ctx); err != nil {
|
||||
if err := cs.updatePrivValidatorPubKey(); err != nil {
|
||||
logger.Error("failed to get private validator pubkey", "err", err)
|
||||
}
|
||||
|
||||
@@ -1892,7 +1814,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
|
||||
if cs.privValidator != nil {
|
||||
if cs.privValidatorPubKey == nil {
|
||||
// Metrics won't be updated, but it's not critical.
|
||||
cs.logger.Error("recordMetrics", "err", errPubKeyIsNotSet)
|
||||
cs.logger.Error(fmt.Sprintf("recordMetrics: %v", errPubKeyIsNotSet))
|
||||
} else {
|
||||
address = cs.privValidatorPubKey.Address()
|
||||
}
|
||||
@@ -1957,11 +1879,9 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
|
||||
//-----------------------------------------------------------------------------
|
||||
|
||||
func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
|
||||
recvTime := tmtime.Now()
|
||||
|
||||
// Already have one
|
||||
// TODO: possibly catch double proposals
|
||||
if cs.Proposal != nil || proposal == nil {
|
||||
if cs.Proposal != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1986,7 +1906,6 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
|
||||
|
||||
proposal.Signature = p.Signature
|
||||
cs.Proposal = proposal
|
||||
cs.ProposalReceiveTime = recvTime
|
||||
// We don't update cs.ProposalBlockParts if it is already set.
|
||||
// This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round.
|
||||
// TODO: We can check if Proposal is for a different block as this is a sign of misbehavior!
|
||||
@@ -2066,7 +1985,7 @@ func (cs *State) addProposalBlockPart(
|
||||
// Update Valid* if we can.
|
||||
prevotes := cs.Votes.Prevotes(cs.Round)
|
||||
blockID, hasTwoThirds := prevotes.TwoThirdsMajority()
|
||||
if hasTwoThirds && !blockID.IsNil() && (cs.ValidRound < cs.Round) {
|
||||
if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.logger.Debug(
|
||||
"updating valid block to new proposal block",
|
||||
@@ -2219,13 +2138,33 @@ func (cs *State) addVote(
|
||||
prevotes := cs.Votes.Prevotes(vote.Round)
|
||||
cs.logger.Debug("added vote to prevote", "vote", vote, "prevotes", prevotes.StringShort())
|
||||
|
||||
// Check to see if >2/3 of the voting power on the network voted for any non-nil block.
|
||||
if blockID, ok := prevotes.TwoThirdsMajority(); ok && !blockID.IsNil() {
|
||||
// Greater than 2/3 of the voting power on the network voted for some
|
||||
// non-nil block
|
||||
// If +2/3 prevotes for a block or nil for *any* round:
|
||||
if blockID, ok := prevotes.TwoThirdsMajority(); ok {
|
||||
// There was a polka!
|
||||
// If we're locked but this is a recent polka, unlock.
|
||||
// If it matches our ProposalBlock, update the ValidBlock
|
||||
|
||||
// Unlock if `cs.LockedRound < vote.Round <= cs.Round`
|
||||
// NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round
|
||||
if (cs.LockedBlock != nil) &&
|
||||
(cs.LockedRound < vote.Round) &&
|
||||
(vote.Round <= cs.Round) &&
|
||||
!cs.LockedBlock.HashesTo(blockID.Hash) {
|
||||
|
||||
cs.logger.Debug("unlocking because of POL", "locked_round", cs.LockedRound, "pol_round", vote.Round)
|
||||
|
||||
cs.LockedRound = -1
|
||||
cs.LockedBlock = nil
|
||||
cs.LockedBlockParts = nil
|
||||
|
||||
if err := cs.eventBus.PublishEventUnlock(ctx, cs.RoundStateEvent()); err != nil {
|
||||
return added, err
|
||||
}
|
||||
}
|
||||
|
||||
// Update Valid* if we can.
|
||||
if cs.ValidRound < vote.Round && vote.Round == cs.Round {
|
||||
// NOTE: our proposal block may be nil or not what received a polka..
|
||||
if len(blockID.Hash) != 0 && (cs.ValidRound < vote.Round) && (vote.Round == cs.Round) {
|
||||
if cs.ProposalBlock.HashesTo(blockID.Hash) {
|
||||
cs.logger.Debug("updating valid block because of POL", "valid_round", cs.ValidRound, "pol_round", vote.Round)
|
||||
cs.ValidRound = vote.Round
|
||||
@@ -2261,7 +2200,7 @@ func (cs *State) addVote(
|
||||
|
||||
case cs.Round == vote.Round && cstypes.RoundStepPrevote <= cs.Step: // current round
|
||||
blockID, ok := prevotes.TwoThirdsMajority()
|
||||
if ok && (cs.isProposalComplete() || blockID.IsNil()) {
|
||||
if ok && (cs.isProposalComplete() || len(blockID.Hash) == 0) {
|
||||
cs.enterPrecommit(ctx, height, vote.Round)
|
||||
} else if prevotes.HasTwoThirdsAny() {
|
||||
cs.enterPrevoteWait(ctx, height, vote.Round)
|
||||
@@ -2289,7 +2228,7 @@ func (cs *State) addVote(
|
||||
cs.enterNewRound(ctx, height, vote.Round)
|
||||
cs.enterPrecommit(ctx, height, vote.Round)
|
||||
|
||||
if !blockID.IsNil() {
|
||||
if len(blockID.Hash) != 0 {
|
||||
cs.enterCommit(ctx, height, vote.Round)
|
||||
if cs.config.SkipTimeoutCommit && precommits.HasAll() {
|
||||
cs.enterNewRound(ctx, cs.Height, 0)
|
||||
@@ -2311,7 +2250,6 @@ func (cs *State) addVote(
|
||||
|
||||
// CONTRACT: cs.privValidator is not nil.
|
||||
func (cs *State) signVote(
|
||||
ctx context.Context,
|
||||
msgType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader,
|
||||
@@ -2334,7 +2272,7 @@ func (cs *State) signVote(
|
||||
ValidatorIndex: valIdx,
|
||||
Height: cs.Height,
|
||||
Round: cs.Round,
|
||||
Timestamp: tmtime.Now(),
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: msgType,
|
||||
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
|
||||
}
|
||||
@@ -2354,16 +2292,40 @@ func (cs *State) signVote(
|
||||
timeout = time.Second
|
||||
}
|
||||
|
||||
ctxto, cancel := context.WithTimeout(ctx, timeout)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
|
||||
defer cancel()
|
||||
|
||||
err := cs.privValidator.SignVote(ctxto, cs.state.ChainID, v)
|
||||
err := cs.privValidator.SignVote(ctx, cs.state.ChainID, v)
|
||||
vote.Signature = v.Signature
|
||||
vote.Timestamp = v.Timestamp
|
||||
|
||||
return vote, err
|
||||
}
|
||||
|
||||
// voteTime ensures monotonicity of the time a validator votes on.
|
||||
// It ensures that for a prior block with a BFT-timestamp of T,
|
||||
// any vote from this validator will have time at least time T + 1ms.
|
||||
// This is needed, as monotonicity of time is a guarantee that BFT time provides.
|
||||
func (cs *State) voteTime() time.Time {
|
||||
now := tmtime.Now()
|
||||
minVoteTime := now
|
||||
// Minimum time increment between blocks
|
||||
const timeIota = time.Millisecond
|
||||
// TODO: We should remove next line in case we don't vote for v in case cs.ProposalBlock == nil,
|
||||
// even if cs.LockedBlock != nil. See https://docs.tendermint.com/master/spec/.
|
||||
if cs.LockedBlock != nil {
|
||||
// See the BFT time spec https://docs.tendermint.com/master/spec/consensus/bft-time.html
|
||||
minVoteTime = cs.LockedBlock.Time.Add(timeIota)
|
||||
} else if cs.ProposalBlock != nil {
|
||||
minVoteTime = cs.ProposalBlock.Time.Add(timeIota)
|
||||
}
|
||||
|
||||
if now.After(minVoteTime) {
|
||||
return now
|
||||
}
|
||||
return minVoteTime
|
||||
}
|
||||
|
||||
// sign the vote and publish on internalMsgQueue
|
||||
func (cs *State) signAddVote(ctx context.Context, msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
if cs.privValidator == nil { // the node does not have a key
|
||||
@@ -2372,7 +2334,7 @@ func (cs *State) signAddVote(ctx context.Context, msgType tmproto.SignedMsgType,
|
||||
|
||||
if cs.privValidatorPubKey == nil {
|
||||
// Vote won't be signed, but it's not critical.
|
||||
cs.logger.Error("signAddVote", "err", errPubKeyIsNotSet)
|
||||
cs.logger.Error(fmt.Sprintf("signAddVote: %v", errPubKeyIsNotSet))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2382,7 +2344,7 @@ func (cs *State) signAddVote(ctx context.Context, msgType tmproto.SignedMsgType,
|
||||
}
|
||||
|
||||
// TODO: pass pubKey to signVote
|
||||
vote, err := cs.signVote(ctx, msgType, hash, header)
|
||||
vote, err := cs.signVote(msgType, hash, header)
|
||||
if err == nil {
|
||||
cs.sendInternalMessage(ctx, msgInfo{&VoteMessage{vote}, ""})
|
||||
cs.logger.Debug("signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote)
|
||||
@@ -2396,7 +2358,7 @@ func (cs *State) signAddVote(ctx context.Context, msgType tmproto.SignedMsgType,
|
||||
// updatePrivValidatorPubKey get's the private validator public key and
|
||||
// memoizes it. This func returns an error if the private validator is not
|
||||
// responding or responds with an error.
|
||||
func (cs *State) updatePrivValidatorPubKey(rctx context.Context) error {
|
||||
func (cs *State) updatePrivValidatorPubKey() error {
|
||||
if cs.privValidator == nil {
|
||||
return nil
|
||||
}
|
||||
@@ -2415,9 +2377,9 @@ func (cs *State) updatePrivValidatorPubKey(rctx context.Context) error {
|
||||
|
||||
// set context timeout depending on the configuration and the State step,
|
||||
// this helps in avoiding blocking of the remote signer connection.
|
||||
ctxto, cancel := context.WithTimeout(rctx, timeout)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), timeout)
|
||||
defer cancel()
|
||||
pubKey, err := cs.privValidator.GetPubKey(ctxto)
|
||||
pubKey, err := cs.privValidator.GetPubKey(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -2450,31 +2412,6 @@ func (cs *State) checkDoubleSigningRisk(height int64) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cs *State) calculatePrevoteMessageDelayMetrics() {
|
||||
if cs.Proposal == nil {
|
||||
return
|
||||
}
|
||||
ps := cs.Votes.Prevotes(cs.Round)
|
||||
pl := ps.List()
|
||||
|
||||
sort.Slice(pl, func(i, j int) bool {
|
||||
return pl[i].Timestamp.Before(pl[j].Timestamp)
|
||||
})
|
||||
|
||||
var votingPowerSeen int64
|
||||
for _, v := range pl {
|
||||
_, val := cs.Validators.GetByAddress(v.ValidatorAddress)
|
||||
votingPowerSeen += val.VotingPower
|
||||
if votingPowerSeen >= cs.Validators.TotalVotingPower()*2/3+1 {
|
||||
cs.metrics.QuorumPrevoteMessageDelay.Set(v.Timestamp.Sub(cs.Proposal.Timestamp).Seconds())
|
||||
break
|
||||
}
|
||||
}
|
||||
if ps.HasAll() {
|
||||
cs.metrics.FullPrevoteMessageDelay.Set(pl[len(pl)-1].Timestamp.Sub(cs.Proposal.Timestamp).Seconds())
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
|
||||
func CompareHRS(h1 int64, r1 int32, s1 cstypes.RoundStepType, h2 int64, r2 int32, s2 cstypes.RoundStepType) int {
|
||||
@@ -2531,17 +2468,3 @@ func repairWalFile(src, dst string) error {
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// proposerWaitTime determines how long the proposer should wait to propose its next block.
|
||||
// If the result is zero, a block can be proposed immediately.
|
||||
//
|
||||
// Block times must be monotonically increasing, so if the block time of the previous
|
||||
// block is larger than the proposer's current time, then the proposer will sleep
|
||||
// until its local clock exceeds the previous block time.
|
||||
func proposerWaitTime(lt tmtime.Source, bt time.Time) time.Duration {
|
||||
t := lt.Now()
|
||||
if bt.After(t) {
|
||||
return bt.Sub(t)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -81,6 +81,7 @@ func (t *timeoutTicker) stopTimer() {
|
||||
select {
|
||||
case <-t.timer.C:
|
||||
default:
|
||||
t.logger.Debug("Timer already stopped")
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -89,6 +90,7 @@ func (t *timeoutTicker) stopTimer() {
|
||||
// timers are interupted and replaced by new ticks from later steps
|
||||
// timeouts of 0 on the tickChan will be immediately relayed to the tockChan
|
||||
func (t *timeoutTicker) timeoutRoutine(ctx context.Context) {
|
||||
t.logger.Debug("Starting timeout routine")
|
||||
var ti timeoutInfo
|
||||
for {
|
||||
select {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
@@ -237,7 +237,7 @@ func (hvs *HeightVoteSet) StringIndented(indent string) string {
|
||||
func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
return json.Marshal(hvs.toAllRoundVotes())
|
||||
return tmjson.Marshal(hvs.toAllRoundVotes())
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes {
|
||||
|
||||
@@ -2,11 +2,10 @@ package types
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto/tmhash"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
@@ -22,7 +21,7 @@ func TestMain(m *testing.M) {
|
||||
var err error
|
||||
cfg, err = config.ResetTestRoot("consensus_height_vote_set_test")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
panic(err)
|
||||
}
|
||||
code := m.Run()
|
||||
os.RemoveAll(cfg.RootDir)
|
||||
@@ -33,7 +32,7 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
valSet, privVals := factory.ValidatorSet(ctx, t, 10, 1)
|
||||
valSet, privVals := factory.RandValidatorSet(10, 1)
|
||||
|
||||
hvs := NewHeightVoteSet(cfg.ChainID(), 1, valSet)
|
||||
|
||||
@@ -72,11 +71,11 @@ func makeVoteHR(
|
||||
valIndex, round int32,
|
||||
privVals []types.PrivValidator,
|
||||
) *types.Vote {
|
||||
t.Helper()
|
||||
|
||||
privVal := privVals[valIndex]
|
||||
pubKey, err := privVal.GetPubKey(ctx)
|
||||
require.NoError(t, err)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
randBytes := tmrand.Bytes(tmhash.Size)
|
||||
|
||||
@@ -93,7 +92,9 @@ func makeVoteHR(
|
||||
|
||||
v := vote.ToProto()
|
||||
err = privVal.SignVote(ctx, chainID, v)
|
||||
require.NoError(t, err, "Error signing vote")
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error signing vote: %v", err))
|
||||
}
|
||||
|
||||
vote.Signature = v.Signature
|
||||
|
||||
|
||||
@@ -13,9 +13,9 @@ import (
|
||||
// PeerRoundState contains the known state of a peer.
|
||||
// NOTE: Read-only when returned by PeerState.GetRoundState().
|
||||
type PeerRoundState struct {
|
||||
Height int64 `json:"height,string"` // Height peer is at
|
||||
Round int32 `json:"round"` // Round peer is at, -1 if unknown.
|
||||
Step RoundStepType `json:"step"` // Step peer is at
|
||||
Height int64 `json:"height"` // Height peer is at
|
||||
Round int32 `json:"round"` // Round peer is at, -1 if unknown.
|
||||
Step RoundStepType `json:"step"` // Step peer is at
|
||||
|
||||
// Estimated start of round 0 at this height
|
||||
StartTime time.Time `json:"start_time"`
|
||||
|
||||
@@ -71,15 +71,14 @@ type RoundState struct {
|
||||
StartTime time.Time `json:"start_time"`
|
||||
|
||||
// Subjective time when +2/3 precommits for Block at Round were found
|
||||
CommitTime time.Time `json:"commit_time"`
|
||||
Validators *types.ValidatorSet `json:"validators"`
|
||||
Proposal *types.Proposal `json:"proposal"`
|
||||
ProposalReceiveTime time.Time `json:"proposal_receive_time"`
|
||||
ProposalBlock *types.Block `json:"proposal_block"`
|
||||
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
|
||||
LockedRound int32 `json:"locked_round"`
|
||||
LockedBlock *types.Block `json:"locked_block"`
|
||||
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
|
||||
CommitTime time.Time `json:"commit_time"`
|
||||
Validators *types.ValidatorSet `json:"validators"`
|
||||
Proposal *types.Proposal `json:"proposal"`
|
||||
ProposalBlock *types.Block `json:"proposal_block"`
|
||||
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
|
||||
LockedRound int32 `json:"locked_round"`
|
||||
LockedBlock *types.Block `json:"locked_block"`
|
||||
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
|
||||
|
||||
// Last known round with POL for non-nil valid block.
|
||||
ValidRound int32 `json:"valid_round"`
|
||||
|
||||
@@ -90,13 +90,13 @@ var _ WAL = &BaseWAL{}
|
||||
|
||||
// NewWAL returns a new write-ahead logger based on `baseWAL`, which implements
|
||||
// WAL. It's flushed and synced to disk every 2s and once when stopped.
|
||||
func NewWAL(ctx context.Context, logger log.Logger, walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) {
|
||||
func NewWAL(logger log.Logger, walFile string, groupOptions ...func(*auto.Group)) (*BaseWAL, error) {
|
||||
err := tmos.EnsureDir(filepath.Dir(walFile), 0700)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to ensure WAL directory is in place: %w", err)
|
||||
}
|
||||
|
||||
group, err := auto.OpenGroup(ctx, logger, walFile, groupOptions...)
|
||||
group, err := auto.OpenGroup(logger, walFile, groupOptions...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -194,7 +194,7 @@ func (wal *BaseWAL) Write(msg WALMessage) error {
|
||||
}
|
||||
|
||||
if err := wal.enc.Encode(&TimedWALMessage{tmtime.Now(), msg}); err != nil {
|
||||
wal.logger.Error("error writing msg to consensus wal. WARNING: recover may not be possible for the current height",
|
||||
wal.logger.Error("Error writing msg to consensus wal. WARNING: recover may not be possible for the current height",
|
||||
"err", err, "msg", msg)
|
||||
return err
|
||||
}
|
||||
@@ -377,14 +377,14 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
return nil, err
|
||||
}
|
||||
if err != nil {
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %w", err)}
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read checksum: %v", err)}
|
||||
}
|
||||
crc := binary.BigEndian.Uint32(b)
|
||||
|
||||
b = make([]byte, 4)
|
||||
_, err = dec.rd.Read(b)
|
||||
if err != nil {
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read length: %w", err)}
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to read length: %v", err)}
|
||||
}
|
||||
length := binary.BigEndian.Uint32(b)
|
||||
|
||||
@@ -410,7 +410,7 @@ func (dec *WALDecoder) Decode() (*TimedWALMessage, error) {
|
||||
var res = new(tmcons.TimedWALMessage)
|
||||
err = proto.Unmarshal(data, res)
|
||||
if err != nil {
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %w", err)}
|
||||
return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)}
|
||||
}
|
||||
|
||||
walMsg, err := WALFromProto(res.Msg)
|
||||
|
||||
@@ -31,12 +31,13 @@ import (
|
||||
// persistent kvstore application and special consensus wal instance
|
||||
// (byteBufferWAL) and waits until numBlocks are created.
|
||||
// If the node fails to produce given numBlocks, it returns an error.
|
||||
func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr io.Writer, numBlocks int) (err error) {
|
||||
func WALGenerateNBlocks(ctx context.Context, t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
cfg := getConfig(t)
|
||||
|
||||
app := kvstore.NewPersistentKVStoreApplication(logger, filepath.Join(cfg.DBDir(), "wal_generator"))
|
||||
app := kvstore.NewPersistentKVStoreApplication(filepath.Join(cfg.DBDir(), "wal_generator"))
|
||||
t.Cleanup(func() { require.NoError(t, app.Close()) })
|
||||
|
||||
logger := log.TestingLogger().With("wal_generator", "wal_generator")
|
||||
logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks)
|
||||
|
||||
// COPY PASTE FROM node.go WITH A FEW MODIFICATIONS
|
||||
@@ -82,7 +83,7 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
|
||||
consensusState := NewState(ctx, logger, cfg.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool)
|
||||
consensusState.SetEventBus(eventBus)
|
||||
if privValidator != nil && privValidator != (*privval.FilePV)(nil) {
|
||||
consensusState.SetPrivValidator(ctx, privValidator)
|
||||
consensusState.SetPrivValidator(privValidator)
|
||||
}
|
||||
// END OF COPY PASTE
|
||||
|
||||
@@ -115,11 +116,11 @@ func WALGenerateNBlocks(ctx context.Context, t *testing.T, logger log.Logger, wr
|
||||
}
|
||||
|
||||
// WALWithNBlocks returns a WAL content with numBlocks.
|
||||
func WALWithNBlocks(ctx context.Context, t *testing.T, logger log.Logger, numBlocks int) (data []byte, err error) {
|
||||
func WALWithNBlocks(ctx context.Context, t *testing.T, numBlocks int) (data []byte, err error) {
|
||||
var b bytes.Buffer
|
||||
wr := bufio.NewWriter(&b)
|
||||
|
||||
if err := WALGenerateNBlocks(ctx, t, logger, wr, numBlocks); err != nil {
|
||||
if err := WALGenerateNBlocks(ctx, t, wr, numBlocks); err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
|
||||
|
||||
@@ -3,13 +3,11 @@ package consensus
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/fortytw2/leaktest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@@ -17,7 +15,6 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/consensus/types"
|
||||
"github.com/tendermint/tendermint/internal/libs/autofile"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -36,7 +33,7 @@ func TestWALTruncate(t *testing.T) {
|
||||
// defaultHeadSizeLimit(10M) is hard to simulate.
|
||||
// this magic number 1 * time.Millisecond make RotateFile check frequently.
|
||||
// defaultGroupCheckDuration(5s) is hard to simulate.
|
||||
wal, err := NewWAL(ctx, logger, walFile,
|
||||
wal, err := NewWAL(logger, walFile,
|
||||
autofile.GroupHeadSizeLimit(4096),
|
||||
autofile.GroupCheckDuration(1*time.Millisecond),
|
||||
)
|
||||
@@ -48,13 +45,9 @@ func TestWALTruncate(t *testing.T) {
|
||||
// 60 block's size nearly 70K, greater than group's headBuf size(4096 * 10),
|
||||
// when headBuf is full, truncate content will Flush to the file. at this
|
||||
// time, RotateFile is called, truncate content exist in each file.
|
||||
err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 60)
|
||||
err = WALGenerateNBlocks(ctx, t, wal.Group(), 60)
|
||||
require.NoError(t, err)
|
||||
|
||||
// put the leakcheck here so it runs after other cleanup
|
||||
// functions.
|
||||
t.Cleanup(leaktest.CheckTimeout(t, 500*time.Millisecond))
|
||||
|
||||
time.Sleep(1 * time.Millisecond) // wait groupCheckDuration, make sure RotateFile run
|
||||
|
||||
if err := wal.FlushAndSync(); err != nil {
|
||||
@@ -110,7 +103,7 @@ func TestWALWrite(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
wal, err := NewWAL(ctx, log.TestingLogger(), walFile)
|
||||
wal, err := NewWAL(log.TestingLogger(), walFile)
|
||||
require.NoError(t, err)
|
||||
err = wal.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
@@ -143,15 +136,13 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
walBody, err := WALWithNBlocks(ctx, t, logger, 6)
|
||||
walBody, err := WALWithNBlocks(ctx, t, 6)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
walFile := tempWALWithData(t, walBody)
|
||||
walFile := tempWALWithData(walBody)
|
||||
|
||||
wal, err := NewWAL(ctx, logger, walFile)
|
||||
wal, err := NewWAL(log.TestingLogger(), walFile)
|
||||
require.NoError(t, err)
|
||||
|
||||
h := int64(3)
|
||||
@@ -170,20 +161,19 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestWALPeriodicSync(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
walDir := t.TempDir()
|
||||
walFile := filepath.Join(walDir, "wal")
|
||||
wal, err := NewWAL(ctx, log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond))
|
||||
wal, err := NewWAL(log.TestingLogger(), walFile, autofile.GroupCheckDuration(1*time.Millisecond))
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
wal.SetFlushInterval(walTestFlushInterval)
|
||||
logger := log.NewNopLogger()
|
||||
|
||||
// Generate some data
|
||||
err = WALGenerateNBlocks(ctx, t, logger, wal.Group(), 5)
|
||||
err = WALGenerateNBlocks(ctx, t, wal.Group(), 5)
|
||||
require.NoError(t, err)
|
||||
|
||||
// We should have data in the buffer now
|
||||
@@ -192,9 +182,7 @@ func TestWALPeriodicSync(t *testing.T) {
|
||||
require.NoError(t, wal.Start(ctx))
|
||||
t.Cleanup(func() {
|
||||
if err := wal.Stop(); err != nil {
|
||||
if !errors.Is(err, service.ErrAlreadyStopped) {
|
||||
t.Error(err)
|
||||
}
|
||||
t.Error(err)
|
||||
}
|
||||
wal.Wait()
|
||||
})
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"strings"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
tmpubsub "github.com/tendermint/tendermint/internal/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
@@ -185,6 +185,10 @@ func (b *EventBus) PublishEventPolka(ctx context.Context, data types.EventDataRo
|
||||
return b.Publish(ctx, types.EventPolkaValue, data)
|
||||
}
|
||||
|
||||
func (b *EventBus) PublishEventUnlock(ctx context.Context, data types.EventDataRoundState) error {
|
||||
return b.Publish(ctx, types.EventUnlockValue, data)
|
||||
}
|
||||
|
||||
func (b *EventBus) PublishEventRelock(ctx context.Context, data types.EventDataRoundState) error {
|
||||
return b.Publish(ctx, types.EventRelockValue, data)
|
||||
}
|
||||
|
||||
@@ -12,9 +12,9 @@ import (
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/eventbus"
|
||||
tmpubsub "github.com/tendermint/tendermint/internal/pubsub"
|
||||
tmquery "github.com/tendermint/tendermint/internal/pubsub/query"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmquery "github.com/tendermint/tendermint/libs/pubsub/query"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -80,9 +80,7 @@ func TestEventBusPublishEventNewBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
block := types.MakeBlock(0, []types.Tx{}, nil, []types.Evidence{})
|
||||
bps, err := block.MakePartSet(types.BlockPartSizeBytes)
|
||||
require.NoError(t, err)
|
||||
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: bps.Header()}
|
||||
blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}
|
||||
resultBeginBlock := abci.ResponseBeginBlock{
|
||||
Events: []abci.Event{
|
||||
{Type: "testType", Attributes: []abci.EventAttribute{{Key: "baz", Value: "1"}}},
|
||||
@@ -309,8 +307,7 @@ func TestEventBusPublishEventNewEvidence(t *testing.T) {
|
||||
err := eventBus.Start(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
ev, err := types.NewMockDuplicateVoteEvidence(ctx, 1, time.Now(), "test-chain-id")
|
||||
require.NoError(t, err)
|
||||
ev := types.NewMockDuplicateVoteEvidence(1, time.Now(), "test-chain-id")
|
||||
|
||||
const query = `tm.event='NewEvidence'`
|
||||
evSub, err := eventBus.SubscribeWithArgs(ctx, tmpubsub.SubscribeArgs{
|
||||
@@ -385,6 +382,7 @@ func TestEventBusPublish(t *testing.T) {
|
||||
require.NoError(t, eventBus.PublishEventNewRound(ctx, types.EventDataNewRound{}))
|
||||
require.NoError(t, eventBus.PublishEventCompleteProposal(ctx, types.EventDataCompleteProposal{}))
|
||||
require.NoError(t, eventBus.PublishEventPolka(ctx, types.EventDataRoundState{}))
|
||||
require.NoError(t, eventBus.PublishEventUnlock(ctx, types.EventDataRoundState{}))
|
||||
require.NoError(t, eventBus.PublishEventRelock(ctx, types.EventDataRoundState{}))
|
||||
require.NoError(t, eventBus.PublishEventLock(ctx, types.EventDataRoundState{}))
|
||||
require.NoError(t, eventBus.PublishEventValidatorSetUpdates(ctx, types.EventDataValidatorSetUpdates{}))
|
||||
@@ -486,6 +484,7 @@ var events = []string{
|
||||
types.EventTimeoutProposeValue,
|
||||
types.EventCompleteProposalValue,
|
||||
types.EventPolkaValue,
|
||||
types.EventUnlockValue,
|
||||
types.EventLockValue,
|
||||
types.EventRelockValue,
|
||||
types.EventTimeoutWaitValue,
|
||||
@@ -506,6 +505,7 @@ var queries = []tmpubsub.Query{
|
||||
types.EventQueryTimeoutPropose,
|
||||
types.EventQueryCompleteProposal,
|
||||
types.EventQueryPolka,
|
||||
types.EventQueryUnlock,
|
||||
types.EventQueryLock,
|
||||
types.EventQueryRelock,
|
||||
types.EventQueryTimeoutWait,
|
||||
|
||||
@@ -382,7 +382,7 @@ func (evpool *Pool) listEvidence(prefixKey int64, maxBytes int64) ([]types.Evide
|
||||
|
||||
iter, err := dbm.IteratePrefix(evpool.evidenceStore, prefixToBytes(prefixKey))
|
||||
if err != nil {
|
||||
return nil, totalSize, fmt.Errorf("database error: %w", err)
|
||||
return nil, totalSize, fmt.Errorf("database error: %v", err)
|
||||
}
|
||||
|
||||
defer iter.Close()
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user