mirror of
https://github.com/tendermint/tendermint.git
synced 2026-01-15 17:22:50 +00:00
Compare commits
14 Commits
tmp
...
bez/nuke-l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
70c8709ab7 | ||
|
|
ab3a3d4dc8 | ||
|
|
82ac5569a0 | ||
|
|
a55904e036 | ||
|
|
af1ef8250b | ||
|
|
984b3c3984 | ||
|
|
68c54f0676 | ||
|
|
dcf91478f8 | ||
|
|
4d0da50e41 | ||
|
|
e100fde0eb | ||
|
|
09a47a1bbf | ||
|
|
beafe7d4f1 | ||
|
|
508b7f9758 | ||
|
|
e5ffe132ae |
@@ -5,9 +5,9 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -34,34 +34,34 @@ type Client interface {
|
||||
// Asynchronous requests
|
||||
FlushAsync(context.Context) (*ReqRes, error)
|
||||
EchoAsync(ctx context.Context, msg string) (*ReqRes, error)
|
||||
InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error)
|
||||
InfoAsync(context.Context, abci.RequestInfo) (*ReqRes, error)
|
||||
DeliverTxAsync(context.Context, abci.RequestDeliverTx) (*ReqRes, error)
|
||||
CheckTxAsync(context.Context, abci.RequestCheckTx) (*ReqRes, error)
|
||||
QueryAsync(context.Context, abci.RequestQuery) (*ReqRes, error)
|
||||
CommitAsync(context.Context) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
InitChainAsync(context.Context, abci.RequestInitChain) (*ReqRes, error)
|
||||
BeginBlockAsync(context.Context, abci.RequestBeginBlock) (*ReqRes, error)
|
||||
EndBlockAsync(context.Context, abci.RequestEndBlock) (*ReqRes, error)
|
||||
ListSnapshotsAsync(context.Context, abci.RequestListSnapshots) (*ReqRes, error)
|
||||
OfferSnapshotAsync(context.Context, abci.RequestOfferSnapshot) (*ReqRes, error)
|
||||
LoadSnapshotChunkAsync(context.Context, abci.RequestLoadSnapshotChunk) (*ReqRes, error)
|
||||
ApplySnapshotChunkAsync(context.Context, abci.RequestApplySnapshotChunk) (*ReqRes, error)
|
||||
|
||||
// Synchronous requests
|
||||
FlushSync(context.Context) error
|
||||
EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error)
|
||||
InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*types.ResponseCommit, error)
|
||||
InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error)
|
||||
EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error)
|
||||
InfoSync(context.Context, abci.RequestInfo) (*abci.ResponseInfo, error)
|
||||
DeliverTxSync(context.Context, abci.RequestDeliverTx) (*abci.ResponseDeliverTx, error)
|
||||
CheckTxSync(context.Context, abci.RequestCheckTx) (*abci.ResponseCheckTx, error)
|
||||
QuerySync(context.Context, abci.RequestQuery) (*abci.ResponseQuery, error)
|
||||
CommitSync(context.Context) (*abci.ResponseCommit, error)
|
||||
InitChainSync(context.Context, abci.RequestInitChain) (*abci.ResponseInitChain, error)
|
||||
BeginBlockSync(context.Context, abci.RequestBeginBlock) (*abci.ResponseBeginBlock, error)
|
||||
EndBlockSync(context.Context, abci.RequestEndBlock) (*abci.ResponseEndBlock, error)
|
||||
ListSnapshotsSync(context.Context, abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error)
|
||||
OfferSnapshotSync(context.Context, abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error)
|
||||
LoadSnapshotChunkSync(context.Context, abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error)
|
||||
ApplySnapshotChunkSync(context.Context, abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error)
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
@@ -80,19 +80,19 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err
|
||||
return
|
||||
}
|
||||
|
||||
type Callback func(*types.Request, *types.Response)
|
||||
type Callback func(*abci.Request, *abci.Response)
|
||||
|
||||
type ReqRes struct {
|
||||
*types.Request
|
||||
*abci.Request
|
||||
*sync.WaitGroup
|
||||
*types.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
*abci.Response // Not set atomically, so be sure to use WaitGroup.
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*types.Response) // A single callback that may be set.
|
||||
done bool // Gets set to true once *after* WaitGroup.Done().
|
||||
cb func(*abci.Response) // A single callback that may be set.
|
||||
}
|
||||
|
||||
func NewReqRes(req *types.Request) *ReqRes {
|
||||
func NewReqRes(req *abci.Request) *ReqRes {
|
||||
return &ReqRes{
|
||||
Request: req,
|
||||
WaitGroup: waitGroup1(),
|
||||
@@ -106,7 +106,7 @@ func NewReqRes(req *types.Request) *ReqRes {
|
||||
// Sets sets the callback. If reqRes is already done, it will call the cb
|
||||
// immediately. Note, reqRes.cb should not change if reqRes.done and only one
|
||||
// callback is supported.
|
||||
func (r *ReqRes) SetCallback(cb func(res *types.Response)) {
|
||||
func (r *ReqRes) SetCallback(cb func(res *abci.Response)) {
|
||||
r.mtx.Lock()
|
||||
|
||||
if r.done {
|
||||
@@ -136,7 +136,7 @@ func (r *ReqRes) InvokeCallback() {
|
||||
// will invoke the callback twice and create a potential race condition.
|
||||
//
|
||||
// ref: https://github.com/tendermint/tendermint/issues/5439
|
||||
func (r *ReqRes) GetCallback() func(*types.Response) {
|
||||
func (r *ReqRes) GetCallback() func(*abci.Response) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
return r.cb
|
||||
|
||||
@@ -9,10 +9,10 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
// A gRPC client.
|
||||
@@ -20,14 +20,14 @@ type grpcClient struct {
|
||||
service.BaseService
|
||||
mustConnect bool
|
||||
|
||||
client types.ABCIApplicationClient
|
||||
client abci.ABCIApplicationClient
|
||||
conn *grpc.ClientConn
|
||||
chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
addr string
|
||||
err error
|
||||
resCb func(*types.Request, *types.Response) // listens to all callbacks
|
||||
resCb func(*abci.Request, *abci.Response) // listens to all callbacks
|
||||
}
|
||||
|
||||
var _ Client = (*grpcClient)(nil)
|
||||
@@ -106,12 +106,12 @@ RETRY_LOOP:
|
||||
}
|
||||
|
||||
cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr)
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
client := abci.NewABCIApplicationClient(conn)
|
||||
cli.conn = conn
|
||||
|
||||
ENSURE_CONNECTED:
|
||||
for {
|
||||
_, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
_, err := client.Echo(context.Background(), &abci.RequestEcho{Message: "hello"}, grpc.WaitForReady(true))
|
||||
if err == nil {
|
||||
break ENSURE_CONNECTED
|
||||
}
|
||||
@@ -166,143 +166,143 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) {
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
req := types.ToRequestEcho(msg)
|
||||
req := abci.ToRequestEcho(msg)
|
||||
res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Echo{Echo: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestFlush()
|
||||
req := abci.ToRequestFlush()
|
||||
res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Flush{Flush: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) {
|
||||
req := types.ToRequestInfo(params)
|
||||
func (cli *grpcClient) InfoAsync(ctx context.Context, params abci.RequestInfo) (*ReqRes, error) {
|
||||
req := abci.ToRequestInfo(params)
|
||||
res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Info{Info: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := types.ToRequestDeliverTx(params)
|
||||
func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params abci.RequestDeliverTx) (*ReqRes, error) {
|
||||
req := abci.ToRequestDeliverTx(params)
|
||||
res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_DeliverTx{DeliverTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) {
|
||||
req := types.ToRequestCheckTx(params)
|
||||
func (cli *grpcClient) CheckTxAsync(ctx context.Context, params abci.RequestCheckTx) (*ReqRes, error) {
|
||||
req := abci.ToRequestCheckTx(params)
|
||||
res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_CheckTx{CheckTx: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) {
|
||||
req := types.ToRequestQuery(params)
|
||||
func (cli *grpcClient) QueryAsync(ctx context.Context, params abci.RequestQuery) (*ReqRes, error) {
|
||||
req := abci.ToRequestQuery(params)
|
||||
res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Query{Query: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
req := types.ToRequestCommit()
|
||||
req := abci.ToRequestCommit()
|
||||
res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Commit{Commit: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) {
|
||||
req := types.ToRequestInitChain(params)
|
||||
func (cli *grpcClient) InitChainAsync(ctx context.Context, params abci.RequestInitChain) (*ReqRes, error) {
|
||||
req := abci.ToRequestInitChain(params)
|
||||
res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_InitChain{InitChain: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestBeginBlock(params)
|
||||
func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params abci.RequestBeginBlock) (*ReqRes, error) {
|
||||
req := abci.ToRequestBeginBlock(params)
|
||||
res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_BeginBlock{BeginBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) {
|
||||
req := types.ToRequestEndBlock(params)
|
||||
func (cli *grpcClient) EndBlockAsync(ctx context.Context, params abci.RequestEndBlock) (*ReqRes, error) {
|
||||
req := abci.ToRequestEndBlock(params)
|
||||
res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_EndBlock{EndBlock: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := types.ToRequestListSnapshots(params)
|
||||
func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params abci.RequestListSnapshots) (*ReqRes, error) {
|
||||
req := abci.ToRequestListSnapshots(params)
|
||||
res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_ListSnapshots{ListSnapshots: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
req := types.ToRequestOfferSnapshot(params)
|
||||
func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params abci.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
req := abci.ToRequestOfferSnapshot(params)
|
||||
res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_OfferSnapshot{OfferSnapshot: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk,
|
||||
params abci.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestLoadSnapshotChunk(params)
|
||||
req := abci.ToRequestLoadSnapshotChunk(params)
|
||||
res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}})
|
||||
}
|
||||
|
||||
// NOTE: call is synchronous, use ctx to break early if needed
|
||||
func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk,
|
||||
params abci.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
req := types.ToRequestApplySnapshotChunk(params)
|
||||
req := abci.ToRequestApplySnapshotChunk(params)
|
||||
res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -310,13 +310,13 @@ func (cli *grpcClient) ApplySnapshotChunkAsync(
|
||||
return cli.finishAsyncCall(
|
||||
ctx,
|
||||
req,
|
||||
&types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
&abci.Response{Value: &abci.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}},
|
||||
)
|
||||
}
|
||||
|
||||
// finishAsyncCall creates a ReqRes for an async call, and immediately populates it
|
||||
// with the response. We don't complete it until it's been ordered via the channel.
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) {
|
||||
func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *abci.Request, res *abci.Response) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
reqres.Response = res
|
||||
select {
|
||||
@@ -330,7 +330,7 @@ func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request,
|
||||
// finishSyncCall waits for an async call to complete. It is necessary to call all
|
||||
// sync calls asynchronously as well, to maintain call and response ordering via
|
||||
// the channel, and this method will wait until the async call completes.
|
||||
func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *abci.Response {
|
||||
// It's possible that the callback is called twice, since the callback can
|
||||
// be called immediately on SetCallback() in addition to after it has been
|
||||
// set. This is because completing the ReqRes happens in a separate critical
|
||||
@@ -346,8 +346,8 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response {
|
||||
// ReqRes should really handle callback dispatch internally, to guarantee
|
||||
// that it's only called once and avoid the above race conditions.
|
||||
var once sync.Once
|
||||
ch := make(chan *types.Response, 1)
|
||||
reqres.SetCallback(func(res *types.Response) {
|
||||
ch := make(chan *abci.Response, 1)
|
||||
reqres.SetCallback(func(res *abci.Response) {
|
||||
once.Do(func() {
|
||||
ch <- res
|
||||
})
|
||||
@@ -361,7 +361,7 @@ func (cli *grpcClient) FlushSync(ctx context.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) {
|
||||
reqres, err := cli.EchoAsync(ctx, msg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -371,8 +371,8 @@ func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.Respons
|
||||
|
||||
func (cli *grpcClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
req abci.RequestInfo,
|
||||
) (*abci.ResponseInfo, error) {
|
||||
reqres, err := cli.InfoAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -382,8 +382,8 @@ func (cli *grpcClient) InfoSync(
|
||||
|
||||
func (cli *grpcClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
params abci.RequestDeliverTx,
|
||||
) (*abci.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.DeliverTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -394,8 +394,8 @@ func (cli *grpcClient) DeliverTxSync(
|
||||
|
||||
func (cli *grpcClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
params types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
params abci.RequestCheckTx,
|
||||
) (*abci.ResponseCheckTx, error) {
|
||||
|
||||
reqres, err := cli.CheckTxAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -406,8 +406,8 @@ func (cli *grpcClient) CheckTxSync(
|
||||
|
||||
func (cli *grpcClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
req abci.RequestQuery,
|
||||
) (*abci.ResponseQuery, error) {
|
||||
reqres, err := cli.QueryAsync(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -415,7 +415,7 @@ func (cli *grpcClient) QuerySync(
|
||||
return cli.finishSyncCall(reqres).GetQuery(), cli.Error()
|
||||
}
|
||||
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
func (cli *grpcClient) CommitSync(ctx context.Context) (*abci.ResponseCommit, error) {
|
||||
reqres, err := cli.CommitAsync(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -425,8 +425,8 @@ func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, e
|
||||
|
||||
func (cli *grpcClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
params types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
params abci.RequestInitChain,
|
||||
) (*abci.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.InitChainAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -437,8 +437,8 @@ func (cli *grpcClient) InitChainSync(
|
||||
|
||||
func (cli *grpcClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
params abci.RequestBeginBlock,
|
||||
) (*abci.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.BeginBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -449,8 +449,8 @@ func (cli *grpcClient) BeginBlockSync(
|
||||
|
||||
func (cli *grpcClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
params types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
params abci.RequestEndBlock,
|
||||
) (*abci.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.EndBlockAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -461,8 +461,8 @@ func (cli *grpcClient) EndBlockSync(
|
||||
|
||||
func (cli *grpcClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
params types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
params abci.RequestListSnapshots,
|
||||
) (*abci.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.ListSnapshotsAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -473,8 +473,8 @@ func (cli *grpcClient) ListSnapshotsSync(
|
||||
|
||||
func (cli *grpcClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
params types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
params abci.RequestOfferSnapshot,
|
||||
) (*abci.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.OfferSnapshotAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -485,7 +485,7 @@ func (cli *grpcClient) OfferSnapshotSync(
|
||||
|
||||
func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
params abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.LoadSnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
@@ -496,7 +496,7 @@ func (cli *grpcClient) LoadSnapshotChunkSync(
|
||||
|
||||
func (cli *grpcClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
params abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.ApplySnapshotChunkAsync(ctx, params)
|
||||
if err != nil {
|
||||
|
||||
@@ -3,9 +3,9 @@ package abcicli
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
types "github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
// NOTE: use defer to unlock mutex because Application might panic (e.g., in
|
||||
|
||||
@@ -3,15 +3,14 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
abci "github.com/tendermint/tendermint/pkg/abci"
|
||||
|
||||
context "context"
|
||||
|
||||
log "github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
types "github.com/tendermint/tendermint/abci/types"
|
||||
)
|
||||
|
||||
// Client is an autogenerated mock type for the Client type
|
||||
@@ -20,11 +19,11 @@ type Client struct {
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 abci.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestApplySnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -33,7 +32,7 @@ func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.Request
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -43,20 +42,20 @@ func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.Request
|
||||
}
|
||||
|
||||
// ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok {
|
||||
var r0 *abci.ResponseApplySnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestApplySnapshotChunk) *abci.ResponseApplySnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk)
|
||||
r0 = ret.Get(0).(*abci.ResponseApplySnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestApplySnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -66,11 +65,11 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA
|
||||
}
|
||||
|
||||
// BeginBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 abci.RequestBeginBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestBeginBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -79,7 +78,7 @@ func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlo
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -89,20 +88,20 @@ func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlo
|
||||
}
|
||||
|
||||
// BeginBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) {
|
||||
func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 abci.RequestBeginBlock) (*abci.ResponseBeginBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok {
|
||||
var r0 *abci.ResponseBeginBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestBeginBlock) *abci.ResponseBeginBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseBeginBlock)
|
||||
r0 = ret.Get(0).(*abci.ResponseBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestBeginBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -112,11 +111,11 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc
|
||||
}
|
||||
|
||||
// CheckTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 abci.RequestCheckTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestCheckTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -125,7 +124,7 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -135,20 +134,20 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*
|
||||
}
|
||||
|
||||
// CheckTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) {
|
||||
func (_m *Client) CheckTxSync(_a0 context.Context, _a1 abci.RequestCheckTx) (*abci.ResponseCheckTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok {
|
||||
var r0 *abci.ResponseCheckTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestCheckTx) *abci.ResponseCheckTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCheckTx)
|
||||
r0 = ret.Get(0).(*abci.ResponseCheckTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestCheckTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -181,15 +180,15 @@ func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) {
|
||||
}
|
||||
|
||||
// CommitSync provides a mock function with given fields: _a0
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) {
|
||||
func (_m *Client) CommitSync(_a0 context.Context) (*abci.ResponseCommit, error) {
|
||||
ret := _m.Called(_a0)
|
||||
|
||||
var r0 *types.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok {
|
||||
var r0 *abci.ResponseCommit
|
||||
if rf, ok := ret.Get(0).(func(context.Context) *abci.ResponseCommit); ok {
|
||||
r0 = rf(_a0)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseCommit)
|
||||
r0 = ret.Get(0).(*abci.ResponseCommit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,11 +203,11 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error)
|
||||
}
|
||||
|
||||
// DeliverTxAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 abci.RequestDeliverTx) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestDeliverTx) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -217,7 +216,7 @@ func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -227,20 +226,20 @@ func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx
|
||||
}
|
||||
|
||||
// DeliverTxSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) {
|
||||
func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 abci.RequestDeliverTx) (*abci.ResponseDeliverTx, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok {
|
||||
var r0 *abci.ResponseDeliverTx
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestDeliverTx) *abci.ResponseDeliverTx); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseDeliverTx)
|
||||
r0 = ret.Get(0).(*abci.ResponseDeliverTx)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestDeliverTx) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -273,15 +272,15 @@ func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, e
|
||||
}
|
||||
|
||||
// EchoSync provides a mock function with given fields: ctx, msg
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
func (_m *Client) EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) {
|
||||
ret := _m.Called(ctx, msg)
|
||||
|
||||
var r0 *types.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok {
|
||||
var r0 *abci.ResponseEcho
|
||||
if rf, ok := ret.Get(0).(func(context.Context, string) *abci.ResponseEcho); ok {
|
||||
r0 = rf(ctx, msg)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEcho)
|
||||
r0 = ret.Get(0).(*abci.ResponseEcho)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -296,11 +295,11 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho
|
||||
}
|
||||
|
||||
// EndBlockAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 abci.RequestEndBlock) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestEndBlock) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -309,7 +308,7 @@ func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock)
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -319,20 +318,20 @@ func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock)
|
||||
}
|
||||
|
||||
// EndBlockSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) {
|
||||
func (_m *Client) EndBlockSync(_a0 context.Context, _a1 abci.RequestEndBlock) (*abci.ResponseEndBlock, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok {
|
||||
var r0 *abci.ResponseEndBlock
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestEndBlock) *abci.ResponseEndBlock); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseEndBlock)
|
||||
r0 = ret.Get(0).(*abci.ResponseEndBlock)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestEndBlock) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -393,11 +392,11 @@ func (_m *Client) FlushSync(_a0 context.Context) error {
|
||||
}
|
||||
|
||||
// InfoAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) InfoAsync(_a0 context.Context, _a1 abci.RequestInfo) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInfo) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -406,7 +405,7 @@ func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicl
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -416,20 +415,20 @@ func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicl
|
||||
}
|
||||
|
||||
// InfoSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) {
|
||||
func (_m *Client) InfoSync(_a0 context.Context, _a1 abci.RequestInfo) (*abci.ResponseInfo, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok {
|
||||
var r0 *abci.ResponseInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInfo) *abci.ResponseInfo); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInfo)
|
||||
r0 = ret.Get(0).(*abci.ResponseInfo)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInfo) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -439,11 +438,11 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R
|
||||
}
|
||||
|
||||
// InitChainAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) InitChainAsync(_a0 context.Context, _a1 abci.RequestInitChain) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInitChain) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -452,7 +451,7 @@ func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -462,20 +461,20 @@ func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain
|
||||
}
|
||||
|
||||
// InitChainSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) {
|
||||
func (_m *Client) InitChainSync(_a0 context.Context, _a1 abci.RequestInitChain) (*abci.ResponseInitChain, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok {
|
||||
var r0 *abci.ResponseInitChain
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInitChain) *abci.ResponseInitChain); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseInitChain)
|
||||
r0 = ret.Get(0).(*abci.ResponseInitChain)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInitChain) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -499,11 +498,11 @@ func (_m *Client) IsRunning() bool {
|
||||
}
|
||||
|
||||
// ListSnapshotsAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 abci.RequestListSnapshots) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestListSnapshots) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -512,7 +511,7 @@ func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListS
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -522,20 +521,20 @@ func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListS
|
||||
}
|
||||
|
||||
// ListSnapshotsSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) {
|
||||
func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok {
|
||||
var r0 *abci.ResponseListSnapshots
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestListSnapshots) *abci.ResponseListSnapshots); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseListSnapshots)
|
||||
r0 = ret.Get(0).(*abci.ResponseListSnapshots)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestListSnapshots) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -545,11 +544,11 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 abci.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -558,7 +557,7 @@ func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestL
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -568,20 +567,20 @@ func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestL
|
||||
}
|
||||
|
||||
// LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok {
|
||||
var r0 *abci.ResponseLoadSnapshotChunk
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestLoadSnapshotChunk) *abci.ResponseLoadSnapshotChunk); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk)
|
||||
r0 = ret.Get(0).(*abci.ResponseLoadSnapshotChunk)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestLoadSnapshotChunk) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -591,11 +590,11 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo
|
||||
}
|
||||
|
||||
// OfferSnapshotAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 abci.RequestOfferSnapshot) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestOfferSnapshot) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -604,7 +603,7 @@ func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOffer
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -614,20 +613,20 @@ func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOffer
|
||||
}
|
||||
|
||||
// OfferSnapshotSync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) {
|
||||
func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok {
|
||||
var r0 *abci.ResponseOfferSnapshot
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestOfferSnapshot) *abci.ResponseOfferSnapshot); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseOfferSnapshot)
|
||||
r0 = ret.Get(0).(*abci.ResponseOfferSnapshot)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestOfferSnapshot) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -670,11 +669,11 @@ func (_m *Client) OnStop() {
|
||||
}
|
||||
|
||||
// QueryAsync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
func (_m *Client) QueryAsync(_a0 context.Context, _a1 abci.RequestQuery) (*abcicli.ReqRes, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *abcicli.ReqRes
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok {
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestQuery) *abcicli.ReqRes); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
@@ -683,7 +682,7 @@ func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abci
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
@@ -693,20 +692,20 @@ func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abci
|
||||
}
|
||||
|
||||
// QuerySync provides a mock function with given fields: _a0, _a1
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) {
|
||||
func (_m *Client) QuerySync(_a0 context.Context, _a1 abci.RequestQuery) (*abci.ResponseQuery, error) {
|
||||
ret := _m.Called(_a0, _a1)
|
||||
|
||||
var r0 *types.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok {
|
||||
var r0 *abci.ResponseQuery
|
||||
if rf, ok := ret.Get(0).(func(context.Context, abci.RequestQuery) *abci.ResponseQuery); ok {
|
||||
r0 = rf(_a0, _a1)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.ResponseQuery)
|
||||
r0 = ret.Get(0).(*abci.ResponseQuery)
|
||||
}
|
||||
}
|
||||
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, abci.RequestQuery) error); ok {
|
||||
r1 = rf(_a0, _a1)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/libs/timer"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -45,8 +45,8 @@ type socketClient struct {
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
err error
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*types.Request, *types.Response) // called on all requests, if set.
|
||||
reqSent *list.List // list of requests sent, waiting for response
|
||||
resCb func(*abci.Request, *abci.Response) // called on all requests, if set.
|
||||
}
|
||||
|
||||
var _ Client = (*socketClient)(nil)
|
||||
@@ -138,14 +138,14 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
}
|
||||
|
||||
cli.willSendReq(reqres.R)
|
||||
err := types.WriteMessage(reqres.R.Request, w)
|
||||
err := abci.WriteMessage(reqres.R.Request, w)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("write to buffer: %w", err))
|
||||
return
|
||||
}
|
||||
|
||||
// If it's a flush request, flush the current buffer.
|
||||
if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok {
|
||||
if _, ok := reqres.R.Request.Value.(*abci.Request_Flush); ok {
|
||||
err = w.Flush()
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("flush buffer: %w", err))
|
||||
@@ -154,7 +154,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
}
|
||||
case <-cli.flushTimer.Ch: // flush queue
|
||||
select {
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}:
|
||||
case cli.reqQueue <- &reqResWithContext{R: NewReqRes(abci.ToRequestFlush()), C: context.Background()}:
|
||||
default:
|
||||
// Probably will fill the buffer, or retry later.
|
||||
}
|
||||
@@ -167,8 +167,8 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) {
|
||||
func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
r := bufio.NewReader(conn)
|
||||
for {
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(r, res)
|
||||
var res = &abci.Response{}
|
||||
err := abci.ReadMessage(r, res)
|
||||
if err != nil {
|
||||
cli.stopForError(fmt.Errorf("read message: %w", err))
|
||||
return
|
||||
@@ -177,7 +177,7 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) {
|
||||
// cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res)
|
||||
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_Exception: // app responded with error
|
||||
case *abci.Response_Exception: // app responded with error
|
||||
// XXX After setting cli.err, release waiters (e.g. reqres.Done())
|
||||
cli.stopForError(errors.New(r.Exception.Error))
|
||||
return
|
||||
@@ -197,7 +197,7 @@ func (cli *socketClient) willSendReq(reqres *ReqRes) {
|
||||
cli.reqSent.PushBack(reqres)
|
||||
}
|
||||
|
||||
func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
func (cli *socketClient) didRecvResponse(res *abci.Response) error {
|
||||
cli.mtx.Lock()
|
||||
defer cli.mtx.Unlock()
|
||||
|
||||
@@ -234,71 +234,71 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error {
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg))
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestEcho(msg))
|
||||
}
|
||||
|
||||
func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestFlush())
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestFlush())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInfo(req))
|
||||
func (cli *socketClient) InfoAsync(ctx context.Context, req abci.RequestInfo) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestInfo(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req))
|
||||
func (cli *socketClient) DeliverTxAsync(ctx context.Context, req abci.RequestDeliverTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestDeliverTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req))
|
||||
func (cli *socketClient) CheckTxAsync(ctx context.Context, req abci.RequestCheckTx) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestCheckTx(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestQuery(req))
|
||||
func (cli *socketClient) QueryAsync(ctx context.Context, req abci.RequestQuery) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestQuery(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestCommit())
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestCommit())
|
||||
}
|
||||
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req))
|
||||
func (cli *socketClient) InitChainAsync(ctx context.Context, req abci.RequestInitChain) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestInitChain(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req))
|
||||
func (cli *socketClient) BeginBlockAsync(ctx context.Context, req abci.RequestBeginBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestBeginBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req))
|
||||
func (cli *socketClient) EndBlockAsync(ctx context.Context, req abci.RequestEndBlock) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestEndBlock(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req))
|
||||
func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req abci.RequestListSnapshots) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestListSnapshots(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req abci.RequestOfferSnapshot) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestOfferSnapshot(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk,
|
||||
req abci.RequestLoadSnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestLoadSnapshotChunk(req))
|
||||
}
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkAsync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk,
|
||||
req abci.RequestApplySnapshotChunk,
|
||||
) (*ReqRes, error) {
|
||||
return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
return cli.queueRequestAsync(ctx, abci.ToRequestApplySnapshotChunk(req))
|
||||
}
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true)
|
||||
reqRes, err := cli.queueRequest(ctx, abci.ToRequestFlush(), true)
|
||||
if err != nil {
|
||||
return queueErr(err)
|
||||
}
|
||||
@@ -322,8 +322,8 @@ func (cli *socketClient) FlushSync(ctx context.Context) error {
|
||||
}
|
||||
}
|
||||
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg))
|
||||
func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestEcho(msg))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -332,9 +332,9 @@ func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.Respo
|
||||
|
||||
func (cli *socketClient) InfoSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInfo,
|
||||
) (*types.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req))
|
||||
req abci.RequestInfo,
|
||||
) (*abci.ResponseInfo, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestInfo(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -343,10 +343,10 @@ func (cli *socketClient) InfoSync(
|
||||
|
||||
func (cli *socketClient) DeliverTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestDeliverTx,
|
||||
) (*types.ResponseDeliverTx, error) {
|
||||
req abci.RequestDeliverTx,
|
||||
) (*abci.ResponseDeliverTx, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestDeliverTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -355,9 +355,9 @@ func (cli *socketClient) DeliverTxSync(
|
||||
|
||||
func (cli *socketClient) CheckTxSync(
|
||||
ctx context.Context,
|
||||
req types.RequestCheckTx,
|
||||
) (*types.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req))
|
||||
req abci.RequestCheckTx,
|
||||
) (*abci.ResponseCheckTx, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestCheckTx(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -366,17 +366,17 @@ func (cli *socketClient) CheckTxSync(
|
||||
|
||||
func (cli *socketClient) QuerySync(
|
||||
ctx context.Context,
|
||||
req types.RequestQuery,
|
||||
) (*types.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req))
|
||||
req abci.RequestQuery,
|
||||
) (*abci.ResponseQuery, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestQuery(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return reqres.Response.GetQuery(), nil
|
||||
}
|
||||
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit())
|
||||
func (cli *socketClient) CommitSync(ctx context.Context) (*abci.ResponseCommit, error) {
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestCommit())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -385,10 +385,10 @@ func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit,
|
||||
|
||||
func (cli *socketClient) InitChainSync(
|
||||
ctx context.Context,
|
||||
req types.RequestInitChain,
|
||||
) (*types.ResponseInitChain, error) {
|
||||
req abci.RequestInitChain,
|
||||
) (*abci.ResponseInitChain, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestInitChain(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -397,10 +397,10 @@ func (cli *socketClient) InitChainSync(
|
||||
|
||||
func (cli *socketClient) BeginBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestBeginBlock,
|
||||
) (*types.ResponseBeginBlock, error) {
|
||||
req abci.RequestBeginBlock,
|
||||
) (*abci.ResponseBeginBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestBeginBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -409,10 +409,10 @@ func (cli *socketClient) BeginBlockSync(
|
||||
|
||||
func (cli *socketClient) EndBlockSync(
|
||||
ctx context.Context,
|
||||
req types.RequestEndBlock,
|
||||
) (*types.ResponseEndBlock, error) {
|
||||
req abci.RequestEndBlock,
|
||||
) (*abci.ResponseEndBlock, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestEndBlock(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -421,10 +421,10 @@ func (cli *socketClient) EndBlockSync(
|
||||
|
||||
func (cli *socketClient) ListSnapshotsSync(
|
||||
ctx context.Context,
|
||||
req types.RequestListSnapshots,
|
||||
) (*types.ResponseListSnapshots, error) {
|
||||
req abci.RequestListSnapshots,
|
||||
) (*abci.ResponseListSnapshots, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestListSnapshots(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -433,10 +433,10 @@ func (cli *socketClient) ListSnapshotsSync(
|
||||
|
||||
func (cli *socketClient) OfferSnapshotSync(
|
||||
ctx context.Context,
|
||||
req types.RequestOfferSnapshot,
|
||||
) (*types.ResponseOfferSnapshot, error) {
|
||||
req abci.RequestOfferSnapshot,
|
||||
) (*abci.ResponseOfferSnapshot, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestOfferSnapshot(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -445,9 +445,9 @@ func (cli *socketClient) OfferSnapshotSync(
|
||||
|
||||
func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) {
|
||||
req abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestLoadSnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -456,9 +456,9 @@ func (cli *socketClient) LoadSnapshotChunkSync(
|
||||
|
||||
func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
ctx context.Context,
|
||||
req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) {
|
||||
req abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) {
|
||||
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req))
|
||||
reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestApplySnapshotChunk(req))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -475,7 +475,7 @@ func (cli *socketClient) ApplySnapshotChunkSync(
|
||||
// non-nil).
|
||||
//
|
||||
// The caller is responsible for checking cli.Error.
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) {
|
||||
func (cli *socketClient) queueRequest(ctx context.Context, req *abci.Request, sync bool) (*ReqRes, error) {
|
||||
reqres := NewReqRes(req)
|
||||
|
||||
if sync {
|
||||
@@ -494,7 +494,7 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
|
||||
|
||||
// Maybe auto-flush, or unset auto-flush
|
||||
switch req.Value.(type) {
|
||||
case *types.Request_Flush:
|
||||
case *abci.Request_Flush:
|
||||
cli.flushTimer.Unset()
|
||||
default:
|
||||
cli.flushTimer.Set()
|
||||
@@ -505,7 +505,7 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s
|
||||
|
||||
func (cli *socketClient) queueRequestAsync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
req *abci.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, false)
|
||||
@@ -518,7 +518,7 @@ func (cli *socketClient) queueRequestAsync(
|
||||
|
||||
func (cli *socketClient) queueRequestAndFlushSync(
|
||||
ctx context.Context,
|
||||
req *types.Request,
|
||||
req *abci.Request,
|
||||
) (*ReqRes, error) {
|
||||
|
||||
reqres, err := cli.queueRequest(ctx, req, true)
|
||||
@@ -561,36 +561,36 @@ LOOP:
|
||||
|
||||
//----------------------------------------
|
||||
|
||||
func resMatchesReq(req *types.Request, res *types.Response) (ok bool) {
|
||||
func resMatchesReq(req *abci.Request, res *abci.Response) (ok bool) {
|
||||
switch req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
_, ok = res.Value.(*types.Response_Echo)
|
||||
case *types.Request_Flush:
|
||||
_, ok = res.Value.(*types.Response_Flush)
|
||||
case *types.Request_Info:
|
||||
_, ok = res.Value.(*types.Response_Info)
|
||||
case *types.Request_DeliverTx:
|
||||
_, ok = res.Value.(*types.Response_DeliverTx)
|
||||
case *types.Request_CheckTx:
|
||||
_, ok = res.Value.(*types.Response_CheckTx)
|
||||
case *types.Request_Commit:
|
||||
_, ok = res.Value.(*types.Response_Commit)
|
||||
case *types.Request_Query:
|
||||
_, ok = res.Value.(*types.Response_Query)
|
||||
case *types.Request_InitChain:
|
||||
_, ok = res.Value.(*types.Response_InitChain)
|
||||
case *types.Request_BeginBlock:
|
||||
_, ok = res.Value.(*types.Response_BeginBlock)
|
||||
case *types.Request_EndBlock:
|
||||
_, ok = res.Value.(*types.Response_EndBlock)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_ApplySnapshotChunk)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
_, ok = res.Value.(*types.Response_LoadSnapshotChunk)
|
||||
case *types.Request_ListSnapshots:
|
||||
_, ok = res.Value.(*types.Response_ListSnapshots)
|
||||
case *types.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*types.Response_OfferSnapshot)
|
||||
case *abci.Request_Echo:
|
||||
_, ok = res.Value.(*abci.Response_Echo)
|
||||
case *abci.Request_Flush:
|
||||
_, ok = res.Value.(*abci.Response_Flush)
|
||||
case *abci.Request_Info:
|
||||
_, ok = res.Value.(*abci.Response_Info)
|
||||
case *abci.Request_DeliverTx:
|
||||
_, ok = res.Value.(*abci.Response_DeliverTx)
|
||||
case *abci.Request_CheckTx:
|
||||
_, ok = res.Value.(*abci.Response_CheckTx)
|
||||
case *abci.Request_Commit:
|
||||
_, ok = res.Value.(*abci.Response_Commit)
|
||||
case *abci.Request_Query:
|
||||
_, ok = res.Value.(*abci.Response_Query)
|
||||
case *abci.Request_InitChain:
|
||||
_, ok = res.Value.(*abci.Response_InitChain)
|
||||
case *abci.Request_BeginBlock:
|
||||
_, ok = res.Value.(*abci.Response_BeginBlock)
|
||||
case *abci.Request_EndBlock:
|
||||
_, ok = res.Value.(*abci.Response_EndBlock)
|
||||
case *abci.Request_ApplySnapshotChunk:
|
||||
_, ok = res.Value.(*abci.Response_ApplySnapshotChunk)
|
||||
case *abci.Request_LoadSnapshotChunk:
|
||||
_, ok = res.Value.(*abci.Response_LoadSnapshotChunk)
|
||||
case *abci.Request_ListSnapshots:
|
||||
_, ok = res.Value.(*abci.Response_ListSnapshots)
|
||||
case *abci.Request_OfferSnapshot:
|
||||
_, ok = res.Value.(*abci.Response_OfferSnapshot)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
@@ -13,8 +13,8 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
@@ -37,7 +37,7 @@ func TestProperSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// This is BeginBlockSync unrolled....
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
reqres, err := c.BeginBlockAsync(ctx, abci.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
err = c.FlushSync(context.Background())
|
||||
assert.NoError(t, err)
|
||||
@@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
resp := make(chan error, 1)
|
||||
go func() {
|
||||
// Start BeginBlock and flush it
|
||||
reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{})
|
||||
reqres, err := c.BeginBlockAsync(ctx, abci.RequestBeginBlock{})
|
||||
assert.NoError(t, err)
|
||||
flush, err := c.FlushAsync(ctx)
|
||||
assert.NoError(t, err)
|
||||
@@ -99,7 +99,7 @@ func TestHangingSyncCalls(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func setupClientServer(t *testing.T, app types.Application) (
|
||||
func setupClientServer(t *testing.T, app abci.Application) (
|
||||
service.Service, abcicli.Client) {
|
||||
// some port between 20k and 30k
|
||||
port := 20000 + rand.Int31()%10000
|
||||
@@ -118,10 +118,10 @@ func setupClientServer(t *testing.T, app types.Application) (
|
||||
}
|
||||
|
||||
type slowApp struct {
|
||||
types.BaseApplication
|
||||
abci.BaseApplication
|
||||
}
|
||||
|
||||
func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
func (slowApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
time.Sleep(200 * time.Millisecond)
|
||||
return types.ResponseBeginBlock{}
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
@@ -20,8 +20,8 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
"github.com/tendermint/tendermint/abci/server"
|
||||
servertest "github.com/tendermint/tendermint/abci/tests/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/abci/version"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
@@ -459,7 +459,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 1 {
|
||||
version = args[0]
|
||||
}
|
||||
res, err := client.InfoSync(ctx, types.RequestInfo{Version: version})
|
||||
res, err := client.InfoSync(ctx, abci.RequestInfo{Version: version})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -484,7 +484,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, err := client.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -510,7 +510,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, err := client.CheckTxSync(ctx, abci.RequestCheckTx{Tx: txBytes})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -550,7 +550,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
resQuery, err := client.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err := client.QuerySync(ctx, abci.RequestQuery{
|
||||
Data: queryBytes,
|
||||
Path: flagPath,
|
||||
Height: int64(flagHeight),
|
||||
@@ -577,7 +577,7 @@ func cmdKVStore(cmd *cobra.Command, args []string) error {
|
||||
logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false)
|
||||
|
||||
// Create the application - in memory or persisted to disk
|
||||
var app types.Application
|
||||
var app abci.Application
|
||||
if flagPersist == "" {
|
||||
app = kvstore.NewApplication()
|
||||
} else {
|
||||
@@ -616,7 +616,7 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) {
|
||||
}
|
||||
|
||||
// Always print the status code.
|
||||
if rsp.Code == types.CodeTypeOK {
|
||||
if rsp.Code == abci.CodeTypeOK {
|
||||
fmt.Printf("-> code: OK\n")
|
||||
} else {
|
||||
fmt.Printf("-> code: %d\n", rsp.Code)
|
||||
|
||||
@@ -21,7 +21,7 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -35,15 +35,15 @@ func TestKVStore(t *testing.T) {
|
||||
|
||||
func TestBaseApp(t *testing.T) {
|
||||
fmt.Println("### Testing BaseApp")
|
||||
testStream(t, types.NewBaseApplication())
|
||||
testStream(t, abci.NewBaseApplication())
|
||||
}
|
||||
|
||||
func TestGRPC(t *testing.T) {
|
||||
fmt.Println("### Testing GRPC")
|
||||
testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication()))
|
||||
testGRPCSync(t, abci.NewGRPCApplication(abci.NewBaseApplication()))
|
||||
}
|
||||
|
||||
func testStream(t *testing.T, app types.Application) {
|
||||
func testStream(t *testing.T, app abci.Application) {
|
||||
const numDeliverTxs = 20000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
@@ -73,10 +73,10 @@ func testStream(t *testing.T, app types.Application) {
|
||||
|
||||
done := make(chan struct{})
|
||||
counter := 0
|
||||
client.SetResponseCallback(func(req *types.Request, res *types.Response) {
|
||||
client.SetResponseCallback(func(req *abci.Request, res *abci.Response) {
|
||||
// Process response
|
||||
switch r := res.Value.(type) {
|
||||
case *types.Response_DeliverTx:
|
||||
case *abci.Response_DeliverTx:
|
||||
counter++
|
||||
if r.DeliverTx.Code != code.CodeTypeOK {
|
||||
t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code)
|
||||
@@ -91,7 +91,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
}()
|
||||
return
|
||||
}
|
||||
case *types.Response_Flush:
|
||||
case *abci.Response_Flush:
|
||||
// ignore
|
||||
default:
|
||||
t.Error("Unexpected response type", reflect.TypeOf(res.Value))
|
||||
@@ -103,7 +103,7 @@ func testStream(t *testing.T, app types.Application) {
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
_, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")})
|
||||
_, err = client.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: []byte("test")})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Sometimes send flush messages
|
||||
@@ -127,7 +127,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) {
|
||||
return tmnet.Connect(addr)
|
||||
}
|
||||
|
||||
func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
func testGRPCSync(t *testing.T, app abci.ABCIApplicationServer) {
|
||||
numDeliverTxs := 2000
|
||||
socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30))
|
||||
defer os.Remove(socketFile)
|
||||
@@ -158,12 +158,12 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) {
|
||||
}
|
||||
})
|
||||
|
||||
client := types.NewABCIApplicationClient(conn)
|
||||
client := abci.NewABCIApplicationClient(conn)
|
||||
|
||||
// Write requests
|
||||
for counter := 0; counter < numDeliverTxs; counter++ {
|
||||
// Send request
|
||||
response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")})
|
||||
response, err := client.DeliverTx(context.Background(), &abci.RequestDeliverTx{Tx: []byte("test")})
|
||||
if err != nil {
|
||||
t.Fatalf("Error in GRPC DeliverTx: %v", err.Error())
|
||||
}
|
||||
|
||||
@@ -3,17 +3,17 @@ package kvstore
|
||||
import (
|
||||
mrand "math/rand"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
// RandVal creates one random validator, with a key derived
|
||||
// from the input value
|
||||
func RandVal(i int) types.ValidatorUpdate {
|
||||
func RandVal(i int) abci.ValidatorUpdate {
|
||||
pubkey := tmrand.Bytes(32)
|
||||
// Random value between [0, 2^16 - 1]
|
||||
power := mrand.Uint32() & (1<<16 - 1) // nolint:gosec // G404: Use of weak random number generator
|
||||
v := types.UpdateValidator(pubkey, int64(power), "")
|
||||
v := abci.UpdateValidator(pubkey, int64(power), "")
|
||||
return v
|
||||
}
|
||||
|
||||
@@ -21,8 +21,8 @@ func RandVal(i int) types.ValidatorUpdate {
|
||||
// the application. Note that the keys are deterministically
|
||||
// derived from the index in the array, while the power is
|
||||
// random (Change this if not desired)
|
||||
func RandVals(cnt int) []types.ValidatorUpdate {
|
||||
res := make([]types.ValidatorUpdate, cnt)
|
||||
func RandVals(cnt int) []abci.ValidatorUpdate {
|
||||
res := make([]abci.ValidatorUpdate, cnt)
|
||||
for i := 0; i < cnt; i++ {
|
||||
res[i] = RandVal(i)
|
||||
}
|
||||
@@ -33,7 +33,7 @@ func RandVals(cnt int) []types.ValidatorUpdate {
|
||||
// which allows tests to pass and is fine as long as you
|
||||
// don't make any tx that modify the validator state
|
||||
func InitKVStore(app *PersistentKVStoreApplication) {
|
||||
app.InitChain(types.RequestInitChain{
|
||||
app.InitChain(abci.RequestInitChain{
|
||||
Validators: RandVals(1),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -9,7 +9,7 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -61,10 +61,10 @@ func prefixKey(key []byte) []byte {
|
||||
|
||||
//---------------------------------------------------
|
||||
|
||||
var _ types.Application = (*Application)(nil)
|
||||
var _ abci.Application = (*Application)(nil)
|
||||
|
||||
type Application struct {
|
||||
types.BaseApplication
|
||||
abci.BaseApplication
|
||||
|
||||
state State
|
||||
RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight)
|
||||
@@ -75,8 +75,8 @@ func NewApplication() *Application {
|
||||
return &Application{state: state}
|
||||
}
|
||||
|
||||
func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) {
|
||||
return types.ResponseInfo{
|
||||
func (app *Application) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) {
|
||||
return abci.ResponseInfo{
|
||||
Data: fmt.Sprintf("{\"size\":%v}", app.state.Size),
|
||||
Version: version.ABCIVersion,
|
||||
AppVersion: ProtocolVersion,
|
||||
@@ -86,7 +86,7 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo)
|
||||
}
|
||||
|
||||
// tx is either "key=value" or just arbitrary bytes
|
||||
func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
func (app *Application) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
var key, value string
|
||||
|
||||
parts := bytes.Split(req.Tx, []byte("="))
|
||||
@@ -102,10 +102,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
}
|
||||
app.state.Size++
|
||||
|
||||
events := []types.Event{
|
||||
events := []abci.Event{
|
||||
{
|
||||
Type: "app",
|
||||
Attributes: []types.EventAttribute{
|
||||
Attributes: []abci.EventAttribute{
|
||||
{Key: "creator", Value: "Cosmoshi Netowoko", Index: true},
|
||||
{Key: "key", Value: key, Index: true},
|
||||
{Key: "index_key", Value: "index is working", Index: true},
|
||||
@@ -114,14 +114,14 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli
|
||||
},
|
||||
}
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
return abci.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events}
|
||||
}
|
||||
|
||||
func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
func (app *Application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1}
|
||||
}
|
||||
|
||||
func (app *Application) Commit() types.ResponseCommit {
|
||||
func (app *Application) Commit() abci.ResponseCommit {
|
||||
// Using a memdb - just return the big endian size of the db
|
||||
appHash := make([]byte, 8)
|
||||
binary.PutVarint(appHash, app.state.Size)
|
||||
@@ -129,7 +129,7 @@ func (app *Application) Commit() types.ResponseCommit {
|
||||
app.state.Height++
|
||||
saveState(app.state)
|
||||
|
||||
resp := types.ResponseCommit{Data: appHash}
|
||||
resp := abci.ResponseCommit{Data: appHash}
|
||||
if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks {
|
||||
resp.RetainHeight = app.state.Height - app.RetainBlocks + 1
|
||||
}
|
||||
@@ -137,7 +137,7 @@ func (app *Application) Commit() types.ResponseCommit {
|
||||
}
|
||||
|
||||
// Returns an associated value or nil if missing.
|
||||
func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
func (app *Application) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
|
||||
if reqQuery.Prove {
|
||||
value, err := app.state.db.Get(prefixKey(reqQuery.Data))
|
||||
if err != nil {
|
||||
|
||||
@@ -15,7 +15,7 @@ import (
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
)
|
||||
|
||||
@@ -26,8 +26,8 @@ const (
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) {
|
||||
req := types.RequestDeliverTx{Tx: tx}
|
||||
func testKVStore(t *testing.T, app abci.Application, tx []byte, key, value string) {
|
||||
req := abci.RequestDeliverTx{Tx: tx}
|
||||
ar := app.DeliverTx(req)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
@@ -36,11 +36,11 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri
|
||||
// commit
|
||||
app.Commit()
|
||||
|
||||
info := app.Info(types.RequestInfo{})
|
||||
info := app.Info(abci.RequestInfo{})
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery := app.Query(types.RequestQuery{
|
||||
resQuery := app.Query(abci.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
@@ -50,7 +50,7 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery = app.Query(types.RequestQuery{
|
||||
resQuery = app.Query(abci.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
@@ -98,7 +98,7 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
InitKVStore(kvstore)
|
||||
height := int64(0)
|
||||
|
||||
resInfo := kvstore.Info(types.RequestInfo{})
|
||||
resInfo := kvstore.Info(abci.RequestInfo{})
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -109,11 +109,11 @@ func TestPersistentKVStoreInfo(t *testing.T) {
|
||||
header := tmproto.Header{
|
||||
Height: height,
|
||||
}
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
kvstore.BeginBlock(abci.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.EndBlock(abci.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
resInfo = kvstore.Info(types.RequestInfo{})
|
||||
resInfo = kvstore.Info(abci.RequestInfo{})
|
||||
if resInfo.LastBlockHeight != height {
|
||||
t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight)
|
||||
}
|
||||
@@ -133,18 +133,18 @@ func TestValUpdates(t *testing.T) {
|
||||
nInit := 5
|
||||
vals := RandVals(total)
|
||||
// initialize with the first nInit
|
||||
kvstore.InitChain(types.RequestInitChain{
|
||||
kvstore.InitChain(abci.RequestInitChain{
|
||||
Validators: vals[:nInit],
|
||||
})
|
||||
|
||||
vals1, vals2 := vals[:nInit], kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
var v1, v2, v3 types.ValidatorUpdate
|
||||
var v1, v2, v3 abci.ValidatorUpdate
|
||||
|
||||
// add some validators
|
||||
v1, v2 = vals[nInit], vals[nInit+1]
|
||||
diff := []types.ValidatorUpdate{v1, v2}
|
||||
diff := []abci.ValidatorUpdate{v1, v2}
|
||||
tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
|
||||
@@ -158,7 +158,7 @@ func TestValUpdates(t *testing.T) {
|
||||
v1.Power = 0
|
||||
v2.Power = 0
|
||||
v3.Power = 0
|
||||
diff = []types.ValidatorUpdate{v1, v2, v3}
|
||||
diff = []abci.ValidatorUpdate{v1, v2, v3}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power)
|
||||
tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power)
|
||||
@@ -176,12 +176,12 @@ func TestValUpdates(t *testing.T) {
|
||||
} else {
|
||||
v1.Power = 5
|
||||
}
|
||||
diff = []types.ValidatorUpdate{v1}
|
||||
diff = []abci.ValidatorUpdate{v1}
|
||||
tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power)
|
||||
|
||||
makeApplyBlock(t, kvstore, 3, diff, tx1)
|
||||
|
||||
vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals1 = append([]abci.ValidatorUpdate{v1}, vals1[1:]...)
|
||||
vals2 = kvstore.Validators()
|
||||
valsEqual(t, vals1, vals2)
|
||||
|
||||
@@ -189,9 +189,9 @@ func TestValUpdates(t *testing.T) {
|
||||
|
||||
func makeApplyBlock(
|
||||
t *testing.T,
|
||||
kvstore types.Application,
|
||||
kvstore abci.Application,
|
||||
heightInt int,
|
||||
diff []types.ValidatorUpdate,
|
||||
diff []abci.ValidatorUpdate,
|
||||
txs ...[]byte) {
|
||||
// make and apply block
|
||||
height := int64(heightInt)
|
||||
@@ -200,13 +200,13 @@ func makeApplyBlock(
|
||||
Height: height,
|
||||
}
|
||||
|
||||
kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header})
|
||||
kvstore.BeginBlock(abci.RequestBeginBlock{Hash: hash, Header: header})
|
||||
for _, tx := range txs {
|
||||
if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
if r := kvstore.DeliverTx(abci.RequestDeliverTx{Tx: tx}); r.IsErr() {
|
||||
t.Fatal(r)
|
||||
}
|
||||
}
|
||||
resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height})
|
||||
resEndBlock := kvstore.EndBlock(abci.RequestEndBlock{Height: header.Height})
|
||||
kvstore.Commit()
|
||||
|
||||
valsEqual(t, diff, resEndBlock.ValidatorUpdates)
|
||||
@@ -214,12 +214,12 @@ func makeApplyBlock(
|
||||
}
|
||||
|
||||
// order doesn't matter
|
||||
func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
func valsEqual(t *testing.T, vals1, vals2 []abci.ValidatorUpdate) {
|
||||
if len(vals1) != len(vals2) {
|
||||
t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1))
|
||||
}
|
||||
sort.Sort(types.ValidatorUpdates(vals1))
|
||||
sort.Sort(types.ValidatorUpdates(vals2))
|
||||
sort.Sort(abci.ValidatorUpdates(vals1))
|
||||
sort.Sort(abci.ValidatorUpdates(vals2))
|
||||
for i, v1 := range vals1 {
|
||||
v2 := vals2[i]
|
||||
if !v1.PubKey.Equal(v2.PubKey) ||
|
||||
@@ -229,7 +229,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
func makeSocketClientServer(app abci.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
@@ -253,12 +253,12 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client,
|
||||
return client, server, nil
|
||||
}
|
||||
|
||||
func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
func makeGRPCClientServer(app abci.Application, name string) (abcicli.Client, service.Service, error) {
|
||||
// Start the listener
|
||||
socket := fmt.Sprintf("unix://%s.sock", name)
|
||||
logger := log.TestingLogger()
|
||||
|
||||
gapp := types.NewGRPCApplication(app)
|
||||
gapp := abci.NewGRPCApplication(app)
|
||||
server := abciserver.NewGRPCServer(socket, gapp)
|
||||
server.SetLogger(logger.With("module", "abci-server"))
|
||||
if err := server.Start(); err != nil {
|
||||
@@ -326,23 +326,23 @@ func runClientTests(t *testing.T, client abcicli.Client) {
|
||||
}
|
||||
|
||||
func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) {
|
||||
ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err := app.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// repeating tx doesn't raise error
|
||||
ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx})
|
||||
ar, err = app.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: tx})
|
||||
require.NoError(t, err)
|
||||
require.False(t, ar.IsErr(), ar)
|
||||
// commit
|
||||
_, err = app.CommitSync(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
info, err := app.InfoSync(ctx, types.RequestInfo{})
|
||||
info, err := app.InfoSync(ctx, abci.RequestInfo{})
|
||||
require.NoError(t, err)
|
||||
require.NotZero(t, info.LastBlockHeight)
|
||||
|
||||
// make sure query is fine
|
||||
resQuery, err := app.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err := app.QuerySync(ctx, abci.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
})
|
||||
@@ -353,7 +353,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string)
|
||||
require.EqualValues(t, info.LastBlockHeight, resQuery.Height)
|
||||
|
||||
// make sure proof is fine
|
||||
resQuery, err = app.QuerySync(ctx, types.RequestQuery{
|
||||
resQuery, err = app.QuerySync(ctx, abci.RequestQuery{
|
||||
Path: "/store",
|
||||
Data: []byte(key),
|
||||
Prove: true,
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
pc "github.com/tendermint/tendermint/proto/tendermint/crypto"
|
||||
)
|
||||
|
||||
@@ -22,13 +22,13 @@ const (
|
||||
|
||||
//-----------------------------------------
|
||||
|
||||
var _ types.Application = (*PersistentKVStoreApplication)(nil)
|
||||
var _ abci.Application = (*PersistentKVStoreApplication)(nil)
|
||||
|
||||
type PersistentKVStoreApplication struct {
|
||||
app *Application
|
||||
|
||||
// validator set
|
||||
ValUpdates []types.ValidatorUpdate
|
||||
ValUpdates []abci.ValidatorUpdate
|
||||
|
||||
valAddrToPubKeyMap map[string]pc.PublicKey
|
||||
|
||||
@@ -59,7 +59,7 @@ func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) {
|
||||
app.logger = l
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo {
|
||||
func (app *PersistentKVStoreApplication) Info(req abci.RequestInfo) abci.ResponseInfo {
|
||||
res := app.app.Info(req)
|
||||
res.LastBlockHeight = app.app.state.Height
|
||||
res.LastBlockAppHash = app.app.state.AppHash
|
||||
@@ -67,7 +67,7 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo
|
||||
}
|
||||
|
||||
// tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx {
|
||||
func (app *PersistentKVStoreApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx {
|
||||
// if it starts with "val:", update the validator set
|
||||
// format is "val:pubkey!power"
|
||||
if isValidatorTx(req.Tx) {
|
||||
@@ -80,18 +80,18 @@ func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) t
|
||||
return app.app.DeliverTx(req)
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx {
|
||||
func (app *PersistentKVStoreApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx {
|
||||
return app.app.CheckTx(req)
|
||||
}
|
||||
|
||||
// Commit will panic if InitChain was not called
|
||||
func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit {
|
||||
func (app *PersistentKVStoreApplication) Commit() abci.ResponseCommit {
|
||||
return app.app.Commit()
|
||||
}
|
||||
|
||||
// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded.
|
||||
// When path=/val and data={validator address}, returns the validator update (abci.ValidatorUpdate) varint encoded.
|
||||
// For any other path, returns an associated value or nil if missing.
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
|
||||
func (app *PersistentKVStoreApplication) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) {
|
||||
switch reqQuery.Path {
|
||||
case "/val":
|
||||
key := []byte("val:" + string(reqQuery.Data))
|
||||
@@ -109,27 +109,27 @@ func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (res
|
||||
}
|
||||
|
||||
// Save the validators in the merkle tree
|
||||
func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain {
|
||||
func (app *PersistentKVStoreApplication) InitChain(req abci.RequestInitChain) abci.ResponseInitChain {
|
||||
for _, v := range req.Validators {
|
||||
r := app.updateValidator(v)
|
||||
if r.IsErr() {
|
||||
app.logger.Error("Error updating validators", "r", r)
|
||||
}
|
||||
}
|
||||
return types.ResponseInitChain{}
|
||||
return abci.ResponseInitChain{}
|
||||
}
|
||||
|
||||
// Track the block hash and header information
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock {
|
||||
func (app *PersistentKVStoreApplication) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock {
|
||||
// reset valset changes
|
||||
app.ValUpdates = make([]types.ValidatorUpdate, 0)
|
||||
app.ValUpdates = make([]abci.ValidatorUpdate, 0)
|
||||
|
||||
// Punish validators who committed equivocation.
|
||||
for _, ev := range req.ByzantineValidators {
|
||||
if ev.Type == types.EvidenceType_DUPLICATE_VOTE {
|
||||
if ev.Type == abci.EvidenceType_DUPLICATE_VOTE {
|
||||
addr := string(ev.Validator.Address)
|
||||
if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok {
|
||||
app.updateValidator(types.ValidatorUpdate{
|
||||
app.updateValidator(abci.ValidatorUpdate{
|
||||
PubKey: pubKey,
|
||||
Power: ev.Validator.Power - 1,
|
||||
})
|
||||
@@ -142,46 +142,46 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock)
|
||||
}
|
||||
}
|
||||
|
||||
return types.ResponseBeginBlock{}
|
||||
return abci.ResponseBeginBlock{}
|
||||
}
|
||||
|
||||
// Update the validator set
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock {
|
||||
return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
func (app *PersistentKVStoreApplication) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock {
|
||||
return abci.ResponseEndBlock{ValidatorUpdates: app.ValUpdates}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ListSnapshots(
|
||||
req types.RequestListSnapshots) types.ResponseListSnapshots {
|
||||
return types.ResponseListSnapshots{}
|
||||
req abci.RequestListSnapshots) abci.ResponseListSnapshots {
|
||||
return abci.ResponseListSnapshots{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) LoadSnapshotChunk(
|
||||
req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk {
|
||||
return types.ResponseLoadSnapshotChunk{}
|
||||
req abci.RequestLoadSnapshotChunk) abci.ResponseLoadSnapshotChunk {
|
||||
return abci.ResponseLoadSnapshotChunk{}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) OfferSnapshot(
|
||||
req types.RequestOfferSnapshot) types.ResponseOfferSnapshot {
|
||||
return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT}
|
||||
req abci.RequestOfferSnapshot) abci.ResponseOfferSnapshot {
|
||||
return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT}
|
||||
}
|
||||
|
||||
func (app *PersistentKVStoreApplication) ApplySnapshotChunk(
|
||||
req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk {
|
||||
return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT}
|
||||
req abci.RequestApplySnapshotChunk) abci.ResponseApplySnapshotChunk {
|
||||
return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ABORT}
|
||||
}
|
||||
|
||||
//---------------------------------------------
|
||||
// update validators
|
||||
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) {
|
||||
func (app *PersistentKVStoreApplication) Validators() (validators []abci.ValidatorUpdate) {
|
||||
itr, err := app.app.state.db.Iterator(nil, nil)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
for ; itr.Valid(); itr.Next() {
|
||||
if isValidatorTx(itr.Key()) {
|
||||
validator := new(types.ValidatorUpdate)
|
||||
err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
validator := new(abci.ValidatorUpdate)
|
||||
err := abci.ReadMessage(bytes.NewBuffer(itr.Value()), validator)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -209,13 +209,13 @@ func isValidatorTx(tx []byte) bool {
|
||||
|
||||
// format is "val:pubkey!power"
|
||||
// pubkey is a base64-encoded 32-byte ed25519 key
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx {
|
||||
func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) abci.ResponseDeliverTx {
|
||||
tx = tx[len(ValidatorSetChangePrefix):]
|
||||
|
||||
// get the pubkey and power
|
||||
pubKeyAndPower := strings.Split(string(tx), "!")
|
||||
if len(pubKeyAndPower) != 2 {
|
||||
return types.ResponseDeliverTx{
|
||||
return abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)}
|
||||
}
|
||||
@@ -224,7 +224,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
// decode the pubkey
|
||||
pubkey, err := base64.StdEncoding.DecodeString(pubkeyS)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
return abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)}
|
||||
}
|
||||
@@ -232,17 +232,17 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon
|
||||
// decode the power
|
||||
power, err := strconv.ParseInt(powerS, 10, 64)
|
||||
if err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
return abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Power (%s) is not an int", powerS)}
|
||||
}
|
||||
|
||||
// update
|
||||
return app.updateValidator(types.UpdateValidator(pubkey, power, ""))
|
||||
return app.updateValidator(abci.UpdateValidator(pubkey, power, ""))
|
||||
}
|
||||
|
||||
// add, update, or remove a validator
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx {
|
||||
func (app *PersistentKVStoreApplication) updateValidator(v abci.ValidatorUpdate) abci.ResponseDeliverTx {
|
||||
pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("can't decode public key: %w", err))
|
||||
@@ -257,7 +257,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
}
|
||||
if !hasKey {
|
||||
pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes())
|
||||
return types.ResponseDeliverTx{
|
||||
return abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeUnauthorized,
|
||||
Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)}
|
||||
}
|
||||
@@ -268,8 +268,8 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
} else {
|
||||
// add or update validator
|
||||
value := bytes.NewBuffer(make([]byte, 0))
|
||||
if err := types.WriteMessage(&v, value); err != nil {
|
||||
return types.ResponseDeliverTx{
|
||||
if err := abci.WriteMessage(&v, value); err != nil {
|
||||
return abci.ResponseDeliverTx{
|
||||
Code: code.CodeTypeEncodingError,
|
||||
Log: fmt.Sprintf("Error encoding validator: %v", err)}
|
||||
}
|
||||
@@ -282,5 +282,5 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate
|
||||
// we only update the changes array if we successfully updated the tree
|
||||
app.ValUpdates = append(app.ValUpdates, v)
|
||||
|
||||
return types.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
return abci.ResponseDeliverTx{Code: code.CodeTypeOK}
|
||||
}
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
|
||||
"google.golang.org/grpc"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
type GRPCServer struct {
|
||||
@@ -18,11 +18,11 @@ type GRPCServer struct {
|
||||
listener net.Listener
|
||||
server *grpc.Server
|
||||
|
||||
app types.ABCIApplicationServer
|
||||
app abci.ABCIApplicationServer
|
||||
}
|
||||
|
||||
// NewGRPCServer returns a new gRPC ABCI server
|
||||
func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service {
|
||||
func NewGRPCServer(protoAddr string, app abci.ABCIApplicationServer) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &GRPCServer{
|
||||
proto: proto,
|
||||
@@ -44,7 +44,7 @@ func (s *GRPCServer) OnStart() error {
|
||||
|
||||
s.listener = ln
|
||||
s.server = grpc.NewServer()
|
||||
types.RegisterABCIApplicationServer(s.server, s.app)
|
||||
abci.RegisterABCIApplicationServer(s.server, s.app)
|
||||
|
||||
s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr)
|
||||
go func() {
|
||||
|
||||
@@ -11,18 +11,18 @@ package server
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) {
|
||||
func NewServer(protoAddr, transport string, app abci.Application) (service.Service, error) {
|
||||
var s service.Service
|
||||
var err error
|
||||
switch transport {
|
||||
case "socket":
|
||||
s = NewSocketServer(protoAddr, app)
|
||||
case "grpc":
|
||||
s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app))
|
||||
s = NewGRPCServer(protoAddr, abci.NewGRPCApplication(app))
|
||||
default:
|
||||
err = fmt.Errorf("unknown server type %s", transport)
|
||||
}
|
||||
|
||||
@@ -8,11 +8,11 @@ import (
|
||||
"os"
|
||||
"runtime"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
tmlog "github.com/tendermint/tendermint/libs/log"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
// var maxNumberConnections = 2
|
||||
@@ -30,10 +30,10 @@ type SocketServer struct {
|
||||
nextConnID int
|
||||
|
||||
appMtx tmsync.Mutex
|
||||
app types.Application
|
||||
app abci.Application
|
||||
}
|
||||
|
||||
func NewSocketServer(protoAddr string, app types.Application) service.Service {
|
||||
func NewSocketServer(protoAddr string, app abci.Application) service.Service {
|
||||
proto, addr := tmnet.ProtocolAndAddress(protoAddr)
|
||||
s := &SocketServer{
|
||||
proto: proto,
|
||||
@@ -120,8 +120,8 @@ func (s *SocketServer) acceptConnectionsRoutine() {
|
||||
|
||||
connID := s.addConn(conn)
|
||||
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *types.Response, 1000) // A channel to buffer responses
|
||||
closeConn := make(chan error, 2) // Push to signal connection closed
|
||||
responses := make(chan *abci.Response, 1000) // A channel to buffer responses
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
go s.handleRequests(closeConn, conn, responses)
|
||||
@@ -152,7 +152,7 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) {
|
||||
}
|
||||
|
||||
// Read requests from conn and deal with them
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) {
|
||||
func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *abci.Response) {
|
||||
var count int
|
||||
var bufReader = bufio.NewReader(conn)
|
||||
|
||||
@@ -174,8 +174,8 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
|
||||
|
||||
for {
|
||||
|
||||
var req = &types.Request{}
|
||||
err := types.ReadMessage(bufReader, req)
|
||||
var req = &abci.Request{}
|
||||
err := abci.ReadMessage(bufReader, req)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
closeConn <- err
|
||||
@@ -191,65 +191,65 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) {
|
||||
func (s *SocketServer) handleRequest(req *abci.Request, responses chan<- *abci.Response) {
|
||||
switch r := req.Value.(type) {
|
||||
case *types.Request_Echo:
|
||||
responses <- types.ToResponseEcho(r.Echo.Message)
|
||||
case *types.Request_Flush:
|
||||
responses <- types.ToResponseFlush()
|
||||
case *types.Request_Info:
|
||||
case *abci.Request_Echo:
|
||||
responses <- abci.ToResponseEcho(r.Echo.Message)
|
||||
case *abci.Request_Flush:
|
||||
responses <- abci.ToResponseFlush()
|
||||
case *abci.Request_Info:
|
||||
res := s.app.Info(*r.Info)
|
||||
responses <- types.ToResponseInfo(res)
|
||||
case *types.Request_DeliverTx:
|
||||
responses <- abci.ToResponseInfo(res)
|
||||
case *abci.Request_DeliverTx:
|
||||
res := s.app.DeliverTx(*r.DeliverTx)
|
||||
responses <- types.ToResponseDeliverTx(res)
|
||||
case *types.Request_CheckTx:
|
||||
responses <- abci.ToResponseDeliverTx(res)
|
||||
case *abci.Request_CheckTx:
|
||||
res := s.app.CheckTx(*r.CheckTx)
|
||||
responses <- types.ToResponseCheckTx(res)
|
||||
case *types.Request_Commit:
|
||||
responses <- abci.ToResponseCheckTx(res)
|
||||
case *abci.Request_Commit:
|
||||
res := s.app.Commit()
|
||||
responses <- types.ToResponseCommit(res)
|
||||
case *types.Request_Query:
|
||||
responses <- abci.ToResponseCommit(res)
|
||||
case *abci.Request_Query:
|
||||
res := s.app.Query(*r.Query)
|
||||
responses <- types.ToResponseQuery(res)
|
||||
case *types.Request_InitChain:
|
||||
responses <- abci.ToResponseQuery(res)
|
||||
case *abci.Request_InitChain:
|
||||
res := s.app.InitChain(*r.InitChain)
|
||||
responses <- types.ToResponseInitChain(res)
|
||||
case *types.Request_BeginBlock:
|
||||
responses <- abci.ToResponseInitChain(res)
|
||||
case *abci.Request_BeginBlock:
|
||||
res := s.app.BeginBlock(*r.BeginBlock)
|
||||
responses <- types.ToResponseBeginBlock(res)
|
||||
case *types.Request_EndBlock:
|
||||
responses <- abci.ToResponseBeginBlock(res)
|
||||
case *abci.Request_EndBlock:
|
||||
res := s.app.EndBlock(*r.EndBlock)
|
||||
responses <- types.ToResponseEndBlock(res)
|
||||
case *types.Request_ListSnapshots:
|
||||
responses <- abci.ToResponseEndBlock(res)
|
||||
case *abci.Request_ListSnapshots:
|
||||
res := s.app.ListSnapshots(*r.ListSnapshots)
|
||||
responses <- types.ToResponseListSnapshots(res)
|
||||
case *types.Request_OfferSnapshot:
|
||||
responses <- abci.ToResponseListSnapshots(res)
|
||||
case *abci.Request_OfferSnapshot:
|
||||
res := s.app.OfferSnapshot(*r.OfferSnapshot)
|
||||
responses <- types.ToResponseOfferSnapshot(res)
|
||||
case *types.Request_LoadSnapshotChunk:
|
||||
responses <- abci.ToResponseOfferSnapshot(res)
|
||||
case *abci.Request_LoadSnapshotChunk:
|
||||
res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk)
|
||||
responses <- types.ToResponseLoadSnapshotChunk(res)
|
||||
case *types.Request_ApplySnapshotChunk:
|
||||
responses <- abci.ToResponseLoadSnapshotChunk(res)
|
||||
case *abci.Request_ApplySnapshotChunk:
|
||||
res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk)
|
||||
responses <- types.ToResponseApplySnapshotChunk(res)
|
||||
responses <- abci.ToResponseApplySnapshotChunk(res)
|
||||
default:
|
||||
responses <- types.ToResponseException("Unknown request")
|
||||
responses <- abci.ToResponseException("Unknown request")
|
||||
}
|
||||
}
|
||||
|
||||
// Pull responses from 'responses' and write them to conn.
|
||||
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) {
|
||||
func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *abci.Response) {
|
||||
var count int
|
||||
var bufWriter = bufio.NewWriter(conn)
|
||||
for {
|
||||
var res = <-responses
|
||||
err := types.WriteMessage(res, bufWriter)
|
||||
err := abci.WriteMessage(res, bufWriter)
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("error writing message: %w", err)
|
||||
return
|
||||
}
|
||||
if _, ok := res.Value.(*types.Response_Flush); ok {
|
||||
if _, ok := res.Value.(*abci.Response_Flush); ok {
|
||||
err = bufWriter.Flush()
|
||||
if err != nil {
|
||||
closeConn <- fmt.Errorf("error flushing write buffer: %w", err)
|
||||
|
||||
@@ -5,8 +5,8 @@ import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -20,8 +20,8 @@ func main() {
|
||||
go func() {
|
||||
counter := 0
|
||||
for {
|
||||
var res = &types.Response{}
|
||||
err := types.ReadMessage(conn, res)
|
||||
var res = &abci.Response{}
|
||||
err := abci.ReadMessage(conn, res)
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
@@ -36,9 +36,9 @@ func main() {
|
||||
counter := 0
|
||||
for i := 0; ; i++ {
|
||||
var bufWriter = bufio.NewWriter(conn)
|
||||
var req = types.ToRequestEcho("foobar")
|
||||
var req = abci.ToRequestEcho("foobar")
|
||||
|
||||
err := types.WriteMessage(req, bufWriter)
|
||||
err := abci.WriteMessage(req, bufWriter)
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
}
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
"log"
|
||||
"reflect"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmnet "github.com/tendermint/tendermint/libs/net"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@@ -21,7 +21,7 @@ func main() {
|
||||
// Make a bunch of requests
|
||||
counter := 0
|
||||
for i := 0; ; i++ {
|
||||
req := types.ToRequestEcho("foobar")
|
||||
req := abci.ToRequestEcho("foobar")
|
||||
_, err := makeRequest(conn, req)
|
||||
if err != nil {
|
||||
log.Fatal(err.Error())
|
||||
@@ -33,15 +33,15 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
func makeRequest(conn io.ReadWriter, req *types.Request) (*types.Response, error) {
|
||||
func makeRequest(conn io.ReadWriter, req *abci.Request) (*abci.Response, error) {
|
||||
var bufWriter = bufio.NewWriter(conn)
|
||||
|
||||
// Write desired request
|
||||
err := types.WriteMessage(req, bufWriter)
|
||||
err := abci.WriteMessage(req, bufWriter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = types.WriteMessage(types.ToRequestFlush(), bufWriter)
|
||||
err = abci.WriteMessage(abci.ToRequestFlush(), bufWriter)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -51,17 +51,17 @@ func makeRequest(conn io.ReadWriter, req *types.Request) (*types.Response, error
|
||||
}
|
||||
|
||||
// Read desired response
|
||||
var res = &types.Response{}
|
||||
err = types.ReadMessage(conn, res)
|
||||
var res = &abci.Response{}
|
||||
err = abci.ReadMessage(conn, res)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var resFlush = &types.Response{}
|
||||
err = types.ReadMessage(conn, resFlush)
|
||||
var resFlush = &abci.Response{}
|
||||
err = abci.ReadMessage(conn, resFlush)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if _, ok := resFlush.Value.(*types.Response_Flush); !ok {
|
||||
if _, ok := resFlush.Value.(*abci.Response_Flush); !ok {
|
||||
return nil, fmt.Errorf("expected flush response but got something else: %v", reflect.TypeOf(resFlush))
|
||||
}
|
||||
|
||||
|
||||
@@ -8,22 +8,22 @@ import (
|
||||
mrand "math/rand"
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/types"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
var ctx = context.Background()
|
||||
|
||||
func InitChain(client abcicli.Client) error {
|
||||
total := 10
|
||||
vals := make([]types.ValidatorUpdate, total)
|
||||
vals := make([]abci.ValidatorUpdate, total)
|
||||
for i := 0; i < total; i++ {
|
||||
pubkey := tmrand.Bytes(33)
|
||||
// nolint:gosec // G404: Use of weak random number generator
|
||||
power := mrand.Int()
|
||||
vals[i] = types.UpdateValidator(pubkey, int64(power), "")
|
||||
vals[i] = abci.UpdateValidator(pubkey, int64(power), "")
|
||||
}
|
||||
_, err := client.InitChainSync(ctx, types.RequestInitChain{
|
||||
_, err := client.InitChainSync(ctx, abci.RequestInitChain{
|
||||
Validators: vals,
|
||||
})
|
||||
if err != nil {
|
||||
@@ -52,7 +52,7 @@ func Commit(client abcicli.Client, hashExp []byte) error {
|
||||
}
|
||||
|
||||
func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes})
|
||||
res, _ := client.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: DeliverTx")
|
||||
@@ -71,7 +71,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []
|
||||
}
|
||||
|
||||
func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error {
|
||||
res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes})
|
||||
res, _ := client.CheckTxSync(ctx, abci.RequestCheckTx{Tx: txBytes})
|
||||
code, data, log := res.Code, res.Data, res.Log
|
||||
if code != codeExp {
|
||||
fmt.Println("Failed test: CheckTx")
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded
|
||||
@@ -18,7 +18,7 @@ var GenNodeKeyCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func genNodeKey(cmd *cobra.Command, args []string) error {
|
||||
nodeKey := types.GenNodeKey()
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
|
||||
bz, err := tmjson.Marshal(nodeKey)
|
||||
if err != nil {
|
||||
|
||||
@@ -6,8 +6,8 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// GenValidatorCmd allows the generation of a keypair for a
|
||||
@@ -19,7 +19,7 @@ var GenValidatorCmd = &cobra.Command{
|
||||
}
|
||||
|
||||
func init() {
|
||||
GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
GenValidatorCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
|
||||
@@ -11,8 +11,9 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// InitFilesCmd initializes a fresh Tendermint Core instance.
|
||||
@@ -30,7 +31,7 @@ var (
|
||||
)
|
||||
|
||||
func init() {
|
||||
InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
InitFilesCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
@@ -75,7 +76,7 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
if tmos.FileExists(nodeKeyFile) {
|
||||
logger.Info("Found node key", "path", nodeKeyFile)
|
||||
} else {
|
||||
if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil {
|
||||
return err
|
||||
}
|
||||
logger.Info("Generated node key", "path", nodeKeyFile)
|
||||
@@ -87,14 +88,14 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
logger.Info("Found genesis file", "path", genFile)
|
||||
} else {
|
||||
|
||||
genDoc := types.GenesisDoc{
|
||||
genDoc := consensus.GenesisDoc{
|
||||
ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)),
|
||||
GenesisTime: tmtime.Now(),
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
ConsensusParams: consensus.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = types.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
genDoc.ConsensusParams.Validator = consensus.ValidatorParams{
|
||||
PubKeyTypes: []string{consensus.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,7 +108,7 @@ func initFilesWithConfig(config *cfg.Config) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genDoc.Validators = []types.GenesisValidator{{
|
||||
genDoc.Validators = []consensus.GenesisValidator{{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 10,
|
||||
|
||||
@@ -8,16 +8,16 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
tmdb "github.com/tendermint/tm-db"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/progressbar"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
ctypes "github.com/tendermint/tendermint/rpc/core/types"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/kv"
|
||||
"github.com/tendermint/tendermint/state/indexer/sink/psql"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -170,7 +170,7 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor
|
||||
return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i)
|
||||
}
|
||||
|
||||
e := types.EventDataNewBlockHeader{
|
||||
e := events.EventDataNewBlockHeader{
|
||||
Header: b.Header,
|
||||
NumTxs: int64(len(b.Txs)),
|
||||
ResultBeginBlock: *r.BeginBlock,
|
||||
@@ -182,7 +182,7 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor
|
||||
batch = indexer.NewBatch(e.NumTxs)
|
||||
|
||||
for i, tx := range b.Data.Txs {
|
||||
tr := abcitypes.TxResult{
|
||||
tr := abci.TxResult{
|
||||
Height: b.Height,
|
||||
Index: uint32(i),
|
||||
Tx: tx,
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
tmcfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/mempool"
|
||||
prototmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
"github.com/tendermint/tendermint/state/indexer"
|
||||
evmocks "github.com/tendermint/tendermint/state/indexer/mocks"
|
||||
"github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -116,27 +118,27 @@ func TestLoadBlockStore(t *testing.T) {
|
||||
func TestReIndexEvent(t *testing.T) {
|
||||
mockBlockStore := &mocks.BlockStore{}
|
||||
mockStateStore := &mocks.Store{}
|
||||
mockEventSink := &mocks.EventSink{}
|
||||
mockEventSink := &evmocks.EventSink{}
|
||||
|
||||
mockBlockStore.
|
||||
On("Base").Return(base).
|
||||
On("Height").Return(height).
|
||||
On("LoadBlock", base).Return(nil).Once().
|
||||
On("LoadBlock", base).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}}).
|
||||
On("LoadBlock", height).Return(&types.Block{Data: types.Data{Txs: types.Txs{make(types.Tx, 1)}}})
|
||||
On("LoadBlock", base).Return(&block.Block{Data: block.Data{Txs: mempool.Txs{make(mempool.Tx, 1)}}}).
|
||||
On("LoadBlock", height).Return(&block.Block{Data: block.Data{Txs: mempool.Txs{make(mempool.Tx, 1)}}})
|
||||
|
||||
mockEventSink.
|
||||
On("Type").Return(indexer.KV).
|
||||
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(errors.New("")).Once().
|
||||
On("IndexBlockEvents", mock.AnythingOfType("types.EventDataNewBlockHeader")).Return(nil).
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(errors.New("")).Once().
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*types.TxResult")).Return(nil)
|
||||
On("IndexBlockEvents", mock.AnythingOfType("events.EventDataNewBlockHeader")).Return(errors.New("")).Once().
|
||||
On("IndexBlockEvents", mock.AnythingOfType("events.EventDataNewBlockHeader")).Return(nil).
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*abci.TxResult")).Return(errors.New("")).Once().
|
||||
On("IndexTxEvents", mock.AnythingOfType("[]*abci.TxResult")).Return(nil)
|
||||
|
||||
dtx := abcitypes.ResponseDeliverTx{}
|
||||
dtx := abci.ResponseDeliverTx{}
|
||||
abciResp := &prototmstate.ABCIResponses{
|
||||
DeliverTxs: []*abcitypes.ResponseDeliverTx{&dtx},
|
||||
EndBlock: &abcitypes.ResponseEndBlock{},
|
||||
BeginBlock: &abcitypes.ResponseBeginBlock{},
|
||||
DeliverTxs: []*abci.ResponseDeliverTx{&dtx},
|
||||
EndBlock: &abci.ResponseEndBlock{},
|
||||
BeginBlock: &abci.ResponseBeginBlock{},
|
||||
}
|
||||
|
||||
mockStateStore.
|
||||
|
||||
@@ -7,8 +7,8 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// ResetAllCmd removes the database of this Tendermint core
|
||||
@@ -23,7 +23,7 @@ var keepAddrBook bool
|
||||
|
||||
func init() {
|
||||
ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact")
|
||||
ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
|
||||
@@ -15,8 +15,8 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -74,7 +74,7 @@ func init() {
|
||||
"P2P Port")
|
||||
TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false,
|
||||
"randomize the moniker for each generated node")
|
||||
TestnetFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519,
|
||||
TestnetFilesCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519,
|
||||
"Key type to generate privval file with. Options: ed25519, secp256k1")
|
||||
}
|
||||
|
||||
@@ -121,7 +121,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
}
|
||||
|
||||
genVals := make([]types.GenesisValidator, nValidators)
|
||||
genVals := make([]consensus.GenesisValidator, nValidators)
|
||||
|
||||
for i := 0; i < nValidators; i++ {
|
||||
nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i)
|
||||
@@ -157,7 +157,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
genVals[i] = types.GenesisValidator{
|
||||
genVals[i] = consensus.GenesisValidator{
|
||||
Address: pubKey.Address(),
|
||||
PubKey: pubKey,
|
||||
Power: 1,
|
||||
@@ -187,16 +187,16 @@ func testnetFiles(cmd *cobra.Command, args []string) error {
|
||||
}
|
||||
|
||||
// Generate genesis doc from generated validators
|
||||
genDoc := &types.GenesisDoc{
|
||||
genDoc := &consensus.GenesisDoc{
|
||||
ChainID: "chain-" + tmrand.Str(6),
|
||||
GenesisTime: tmtime.Now(),
|
||||
InitialHeight: initialHeight,
|
||||
Validators: genVals,
|
||||
ConsensusParams: types.DefaultConsensusParams(),
|
||||
ConsensusParams: consensus.DefaultConsensusParams(),
|
||||
}
|
||||
if keyType == "secp256k1" {
|
||||
genDoc.ConsensusParams.Validator = types.ValidatorParams{
|
||||
PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1},
|
||||
genDoc.ConsensusParams.Validator = consensus.ValidatorParams{
|
||||
PubKeyTypes: []string{consensus.ABCIPubKeyTypeSecp256k1},
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -288,23 +288,23 @@ func (cfg BaseConfig) NodeKeyFile() string {
|
||||
}
|
||||
|
||||
// LoadNodeKey loads NodeKey located in filePath.
|
||||
func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) {
|
||||
func (cfg BaseConfig) LoadNodeKeyID() (p2p.NodeID, error) {
|
||||
jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile())
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeKey := types.NodeKey{}
|
||||
nodeKey := p2p.NodeKey{}
|
||||
err = tmjson.Unmarshal(jsonBytes, &nodeKey)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey())
|
||||
nodeKey.ID = p2p.NodeIDFromPubKey(nodeKey.PubKey())
|
||||
return nodeKey.ID, nil
|
||||
}
|
||||
|
||||
// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If
|
||||
// the file does not exist, it generates and saves a new NodeKey.
|
||||
func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) {
|
||||
func (cfg BaseConfig) LoadOrGenNodeKeyID() (p2p.NodeID, error) {
|
||||
if tmos.FileExists(cfg.NodeKeyFile()) {
|
||||
nodeKey, err := cfg.LoadNodeKeyID()
|
||||
if err != nil {
|
||||
@@ -313,7 +313,7 @@ func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) {
|
||||
return nodeKey, nil
|
||||
}
|
||||
|
||||
nodeKey := types.GenNodeKey()
|
||||
nodeKey := p2p.GenNodeKey()
|
||||
|
||||
if err := nodeKey.SaveAs(cfg.NodeKeyFile()); err != nil {
|
||||
return "", err
|
||||
|
||||
@@ -87,7 +87,7 @@ Create a file called `app.go` with the following content:
|
||||
package main
|
||||
|
||||
import (
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
abcitypes "github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
type KVStoreApplication struct {}
|
||||
@@ -346,7 +346,7 @@ import (
|
||||
"github.com/dgraph-io/badger"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
tmflags "github.com/tendermint/tendermint/libs/cli/flags"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
|
||||
@@ -90,7 +90,7 @@ Create a file called `app.go` with the following content:
|
||||
package main
|
||||
|
||||
import (
|
||||
abcitypes "github.com/tendermint/tendermint/abci/types"
|
||||
abcitypes "github.com/tendermint/tendermint/pkg/abci"
|
||||
)
|
||||
|
||||
type KVStoreApplication struct {}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package blocksync
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
MaxMsgSize = types.MaxBlockSizeBytes +
|
||||
MaxMsgSize = metadata.MaxBlockSizeBytes +
|
||||
bcproto.BlockResponseMessagePrefixSize +
|
||||
bcproto.BlockResponseMessageFieldKeySize
|
||||
)
|
||||
|
||||
@@ -11,7 +11,8 @@ import (
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -62,7 +63,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests
|
||||
// PeerID responsible for delivering the block.
|
||||
type BlockRequest struct {
|
||||
Height int64
|
||||
PeerID types.NodeID
|
||||
PeerID p2p.NodeID
|
||||
}
|
||||
|
||||
// BlockPool keeps track of the block sync peers, block requests and block responses.
|
||||
@@ -75,7 +76,7 @@ type BlockPool struct {
|
||||
requesters map[int64]*bpRequester
|
||||
height int64 // the lowest key in requesters.
|
||||
// peers
|
||||
peers map[types.NodeID]*bpPeer
|
||||
peers map[p2p.NodeID]*bpPeer
|
||||
maxPeerHeight int64 // the biggest reported height
|
||||
|
||||
// atomic
|
||||
@@ -93,7 +94,7 @@ type BlockPool struct {
|
||||
// requests and errors will be sent to requestsCh and errorsCh accordingly.
|
||||
func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool {
|
||||
bp := &BlockPool{
|
||||
peers: make(map[types.NodeID]*bpPeer),
|
||||
peers: make(map[p2p.NodeID]*bpPeer),
|
||||
|
||||
requesters: make(map[int64]*bpRequester),
|
||||
height: start,
|
||||
@@ -197,7 +198,7 @@ func (pool *BlockPool) IsCaughtUp() bool {
|
||||
// We need to see the second block's Commit to validate the first block.
|
||||
// So we peek two blocks at a time.
|
||||
// The caller will verify the commit.
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) {
|
||||
func (pool *BlockPool) PeekTwoBlocks() (first *block.Block, second *block.Block) {
|
||||
pool.mtx.RLock()
|
||||
defer pool.mtx.RUnlock()
|
||||
|
||||
@@ -244,13 +245,13 @@ func (pool *BlockPool) PopRequest() {
|
||||
// RedoRequest invalidates the block at pool.height,
|
||||
// Remove the peer and redo request from others.
|
||||
// Returns the ID of the removed peer.
|
||||
func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
request := pool.requesters[height]
|
||||
peerID := request.getPeerID()
|
||||
if peerID != types.NodeID("") {
|
||||
if peerID != p2p.NodeID("") {
|
||||
// RemovePeer will redo all requesters associated with this peer.
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
@@ -259,7 +260,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID {
|
||||
|
||||
// AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it.
|
||||
// TODO: ensure that blocks come in order for each peer.
|
||||
func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) {
|
||||
func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *block.Block, blockSize int) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -306,7 +307,7 @@ func (pool *BlockPool) LastAdvance() time.Time {
|
||||
}
|
||||
|
||||
// SetPeerRange sets the peer's alleged blockchain base and height.
|
||||
func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) {
|
||||
func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
@@ -327,14 +328,14 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6
|
||||
|
||||
// RemovePeer removes the peer with peerID from the pool. If there's no peer
|
||||
// with peerID, function is a no-op.
|
||||
func (pool *BlockPool) RemovePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) {
|
||||
pool.mtx.Lock()
|
||||
defer pool.mtx.Unlock()
|
||||
|
||||
pool.removePeer(peerID)
|
||||
}
|
||||
|
||||
func (pool *BlockPool) removePeer(peerID types.NodeID) {
|
||||
func (pool *BlockPool) removePeer(peerID p2p.NodeID) {
|
||||
for _, requester := range pool.requesters {
|
||||
if requester.getPeerID() == peerID {
|
||||
requester.redo(peerID)
|
||||
@@ -415,14 +416,14 @@ func (pool *BlockPool) requestersLen() int64 {
|
||||
return int64(len(pool.requesters))
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
pool.requestsCh <- BlockRequest{height, peerID}
|
||||
}
|
||||
|
||||
func (pool *BlockPool) sendError(err error, peerID types.NodeID) {
|
||||
func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) {
|
||||
if !pool.IsRunning() {
|
||||
return
|
||||
}
|
||||
@@ -470,7 +471,7 @@ type bpPeer struct {
|
||||
height int64
|
||||
base int64
|
||||
pool *BlockPool
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
recvMonitor *flow.Monitor
|
||||
|
||||
timeout *time.Timer
|
||||
@@ -478,7 +479,7 @@ type bpPeer struct {
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer {
|
||||
func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer {
|
||||
peer := &bpPeer{
|
||||
pool: pool,
|
||||
id: peerID,
|
||||
@@ -543,11 +544,11 @@ type bpRequester struct {
|
||||
pool *BlockPool
|
||||
height int64
|
||||
gotBlockCh chan struct{}
|
||||
redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerID types.NodeID
|
||||
block *types.Block
|
||||
peerID p2p.NodeID
|
||||
block *block.Block
|
||||
}
|
||||
|
||||
func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
@@ -555,7 +556,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester {
|
||||
pool: pool,
|
||||
height: height,
|
||||
gotBlockCh: make(chan struct{}, 1),
|
||||
redoCh: make(chan types.NodeID, 1),
|
||||
redoCh: make(chan p2p.NodeID, 1),
|
||||
|
||||
peerID: "",
|
||||
block: nil,
|
||||
@@ -570,7 +571,7 @@ func (bpr *bpRequester) OnStart() error {
|
||||
}
|
||||
|
||||
// Returns true if the peer matches and block doesn't already exist.
|
||||
func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
|
||||
func (bpr *bpRequester) setBlock(block *block.Block, peerID p2p.NodeID) bool {
|
||||
bpr.mtx.Lock()
|
||||
if bpr.block != nil || bpr.peerID != peerID {
|
||||
bpr.mtx.Unlock()
|
||||
@@ -586,13 +587,13 @@ func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getBlock() *types.Block {
|
||||
func (bpr *bpRequester) getBlock() *block.Block {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.block
|
||||
}
|
||||
|
||||
func (bpr *bpRequester) getPeerID() types.NodeID {
|
||||
func (bpr *bpRequester) getPeerID() p2p.NodeID {
|
||||
bpr.mtx.Lock()
|
||||
defer bpr.mtx.Unlock()
|
||||
return bpr.peerID
|
||||
@@ -614,7 +615,7 @@ func (bpr *bpRequester) reset() {
|
||||
// Tells bpRequester to pick another peer and try again.
|
||||
// NOTE: Nonblocking, and does nothing if another redo
|
||||
// was already requested.
|
||||
func (bpr *bpRequester) redo(peerID types.NodeID) {
|
||||
func (bpr *bpRequester) redo(peerID p2p.NodeID) {
|
||||
select {
|
||||
case bpr.redoCh <- peerID:
|
||||
default:
|
||||
|
||||
@@ -11,7 +11,9 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@@ -19,7 +21,7 @@ func init() {
|
||||
}
|
||||
|
||||
type testPeer struct {
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
inputChan chan inputData // make sure each peer's data is sequential
|
||||
@@ -41,7 +43,7 @@ func (p testPeer) runInputRoutine() {
|
||||
|
||||
// Request desired, pretend like we got the block immediately.
|
||||
func (p testPeer) simulateInput(input inputData) {
|
||||
block := &types.Block{Header: types.Header{Height: input.request.Height}}
|
||||
block := &block.Block{Header: metadata.Header{Height: input.request.Height}}
|
||||
input.pool.AddBlock(input.request.PeerID, block, 123)
|
||||
// TODO: uncommenting this creates a race which is detected by:
|
||||
// https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856
|
||||
@@ -49,7 +51,7 @@ func (p testPeer) simulateInput(input inputData) {
|
||||
// input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height)
|
||||
}
|
||||
|
||||
type testPeers map[types.NodeID]testPeer
|
||||
type testPeers map[p2p.NodeID]testPeer
|
||||
|
||||
func (ps testPeers) start() {
|
||||
for _, v := range ps {
|
||||
@@ -66,7 +68,7 @@ func (ps testPeers) stop() {
|
||||
func makePeers(numPeers int, minHeight, maxHeight int64) testPeers {
|
||||
peers := make(testPeers, numPeers)
|
||||
for i := 0; i < numPeers; i++ {
|
||||
peerID := types.NodeID(tmrand.Str(12))
|
||||
peerID := p2p.NodeID(tmrand.Str(12))
|
||||
height := minHeight + mrand.Int63n(maxHeight-minHeight)
|
||||
base := minHeight + int64(i)
|
||||
if base > height {
|
||||
@@ -182,7 +184,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
|
||||
// Pull from channels
|
||||
counter := 0
|
||||
timedOut := map[types.NodeID]struct{}{}
|
||||
timedOut := map[p2p.NodeID]struct{}{}
|
||||
for {
|
||||
select {
|
||||
case err := <-errorsCh:
|
||||
@@ -203,7 +205,7 @@ func TestBlockPoolTimeout(t *testing.T) {
|
||||
func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
peers := make(testPeers, 10)
|
||||
for i := 0; i < 10; i++ {
|
||||
peerID := types.NodeID(fmt.Sprintf("%d", i+1))
|
||||
peerID := p2p.NodeID(fmt.Sprintf("%d", i+1))
|
||||
height := int64(i + 1)
|
||||
peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)}
|
||||
}
|
||||
@@ -227,10 +229,10 @@ func TestBlockPoolRemovePeer(t *testing.T) {
|
||||
assert.EqualValues(t, 10, pool.MaxPeerHeight())
|
||||
|
||||
// remove not-existing peer
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) })
|
||||
assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) })
|
||||
|
||||
// remove peer with biggest height
|
||||
pool.RemovePeer(types.NodeID("10"))
|
||||
pool.RemovePeer(p2p.NodeID("10"))
|
||||
assert.EqualValues(t, 9, pool.MaxPeerHeight())
|
||||
|
||||
// remove all peers
|
||||
|
||||
@@ -12,10 +12,12 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmSync "github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -67,7 +69,7 @@ type consensusReactor interface {
|
||||
|
||||
type peerError struct {
|
||||
err error
|
||||
peerID types.NodeID
|
||||
peerID p2ptypes.NodeID
|
||||
}
|
||||
|
||||
func (e peerError) Error() string {
|
||||
@@ -205,7 +207,7 @@ func (r *Reactor) OnStop() {
|
||||
|
||||
// respondToPeer loads a block and sends it to the requesting peer, if we have it.
|
||||
// Otherwise, we'll respond saying we do not have it.
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) {
|
||||
func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2ptypes.NodeID) {
|
||||
block := r.store.LoadBlock(msg.Height)
|
||||
if block != nil {
|
||||
blockProto, err := block.ToProto()
|
||||
@@ -240,7 +242,7 @@ func (r *Reactor) handleBlockSyncMessage(envelope p2p.Envelope) error {
|
||||
r.respondToPeer(msg, envelope.From)
|
||||
|
||||
case *bcproto.BlockResponse:
|
||||
block, err := types.BlockFromProto(msg.Block)
|
||||
block, err := block.BlockFromProto(msg.Block)
|
||||
if err != nil {
|
||||
logger.Error("failed to convert block from proto", "err", err)
|
||||
return err
|
||||
@@ -531,9 +533,9 @@ FOR_LOOP:
|
||||
}
|
||||
|
||||
var (
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstParts = first.MakePartSet(metadata.BlockPartSizeBytes)
|
||||
firstPartSetHeader = firstParts.Header()
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
firstID = metadata.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader}
|
||||
)
|
||||
|
||||
// Finally, verify the first block using the second's commit.
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/internal/mempool/mock"
|
||||
@@ -15,34 +14,37 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
logger log.Logger
|
||||
nodes []types.NodeID
|
||||
nodes []p2ptypes.NodeID
|
||||
|
||||
reactors map[types.NodeID]*Reactor
|
||||
app map[types.NodeID]proxy.AppConns
|
||||
reactors map[p2ptypes.NodeID]*Reactor
|
||||
app map[p2ptypes.NodeID]proxy.AppConns
|
||||
|
||||
blockSyncChannels map[types.NodeID]*p2p.Channel
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
blockSyncChannels map[p2ptypes.NodeID]*p2p.Channel
|
||||
peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates
|
||||
|
||||
blockSync bool
|
||||
}
|
||||
|
||||
func setup(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVal types.PrivValidator,
|
||||
genDoc *consensus.GenesisDoc,
|
||||
privVal consensus.PrivValidator,
|
||||
maxBlockHeights []int64,
|
||||
chBuf uint,
|
||||
) *reactorTestSuite {
|
||||
@@ -53,14 +55,14 @@ func setup(
|
||||
"must specify at least one block height (nodes)")
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
logger: log.TestingLogger().With("module", "block_sync", "testCase", t.Name()),
|
||||
logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
nodes: make([]types.NodeID, 0, numNodes),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
app: make(map[types.NodeID]proxy.AppConns, numNodes),
|
||||
blockSyncChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
nodes: make([]p2ptypes.NodeID, 0, numNodes),
|
||||
reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes),
|
||||
app: make(map[p2ptypes.NodeID]proxy.AppConns, numNodes),
|
||||
blockSyncChannels: make(map[p2ptypes.NodeID]*p2p.Channel, numNodes),
|
||||
peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
blockSync: true,
|
||||
}
|
||||
|
||||
@@ -89,9 +91,9 @@ func setup(
|
||||
}
|
||||
|
||||
func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
nodeID types.NodeID,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVal types.PrivValidator,
|
||||
nodeID p2ptypes.NodeID,
|
||||
genDoc *consensus.GenesisDoc,
|
||||
privVal consensus.PrivValidator,
|
||||
maxBlockHeight int64,
|
||||
) {
|
||||
t.Helper()
|
||||
@@ -119,7 +121,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
)
|
||||
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
lastCommit := metadata.NewCommit(blockHeight-1, 0, metadata.BlockID{}, nil)
|
||||
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
@@ -134,17 +136,17 @@ func (rts *reactorTestSuite) addNode(t *testing.T,
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
lastCommit = types.NewCommit(
|
||||
lastCommit = metadata.NewCommit(
|
||||
vote.Height,
|
||||
vote.Round,
|
||||
lastBlockMeta.BlockID,
|
||||
[]types.CommitSig{vote.CommitSig()},
|
||||
[]metadata.CommitSig{vote.CommitSig()},
|
||||
)
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
thisParts := thisBlock.MakePartSet(metadata.BlockPartSizeBytes)
|
||||
blockID := metadata.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
package behavior
|
||||
|
||||
import "github.com/tendermint/tendermint/types"
|
||||
import "github.com/tendermint/tendermint/pkg/p2p"
|
||||
|
||||
// PeerBehavior is a struct describing a behavior a peer performed.
|
||||
// `peerID` identifies the peer and reason characterizes the specific
|
||||
// behavior performed by the peer.
|
||||
type PeerBehavior struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@ type badMessage struct {
|
||||
}
|
||||
|
||||
// BadMessage returns a badMessage PeerBehavior.
|
||||
func BadMessage(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func BadMessage(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: badMessage{explanation}}
|
||||
}
|
||||
|
||||
@@ -24,7 +24,7 @@ type messageOutOfOrder struct {
|
||||
}
|
||||
|
||||
// MessageOutOfOrder returns a messagOutOfOrder PeerBehavior.
|
||||
func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}}
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ type consensusVote struct {
|
||||
}
|
||||
|
||||
// ConsensusVote returns a consensusVote PeerBehavior.
|
||||
func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}}
|
||||
}
|
||||
|
||||
@@ -42,6 +42,6 @@ type blockPart struct {
|
||||
}
|
||||
|
||||
// BlockPart returns blockPart PeerBehavior.
|
||||
func BlockPart(peerID types.NodeID, explanation string) PeerBehavior {
|
||||
func BlockPart(peerID p2p.NodeID, explanation string) PeerBehavior {
|
||||
return PeerBehavior{peerID: peerID, reason: blockPart{explanation}}
|
||||
}
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// Reporter provides an interface for reactors to report the behavior
|
||||
@@ -52,14 +52,14 @@ func (spbr *SwitchReporter) Report(behavior PeerBehavior) error {
|
||||
// behavior in manufactured scenarios.
|
||||
type MockReporter struct {
|
||||
mtx tmsync.RWMutex
|
||||
pb map[types.NodeID][]PeerBehavior
|
||||
pb map[p2ptypes.NodeID][]PeerBehavior
|
||||
}
|
||||
|
||||
// NewMockReporter returns a Reporter which records all reported
|
||||
// behaviors in memory.
|
||||
func NewMockReporter() *MockReporter {
|
||||
return &MockReporter{
|
||||
pb: map[types.NodeID][]PeerBehavior{},
|
||||
pb: map[p2ptypes.NodeID][]PeerBehavior{},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,7 +73,7 @@ func (mpbr *MockReporter) Report(behavior PeerBehavior) error {
|
||||
}
|
||||
|
||||
// GetBehaviors returns all behaviors reported on the peer identified by peerID.
|
||||
func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior {
|
||||
func (mpbr *MockReporter) GetBehaviors(peerID p2ptypes.NodeID) []PeerBehavior {
|
||||
mpbr.mtx.RLock()
|
||||
defer mpbr.mtx.RUnlock()
|
||||
if items, ok := mpbr.pb[peerID]; ok {
|
||||
|
||||
@@ -5,13 +5,13 @@ import (
|
||||
"testing"
|
||||
|
||||
bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// TestMockReporter tests the MockReporter's ability to store reported
|
||||
// peer behavior in memory indexed by the peerID.
|
||||
func TestMockReporter(t *testing.T) {
|
||||
var peerID types.NodeID = "MockPeer"
|
||||
var peerID p2p.NodeID = "MockPeer"
|
||||
pr := bh.NewMockReporter()
|
||||
|
||||
behaviors := pr.GetBehaviors(peerID)
|
||||
@@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) {
|
||||
}
|
||||
|
||||
type scriptItem struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
behavior bh.PeerBehavior
|
||||
}
|
||||
|
||||
@@ -76,10 +76,10 @@ func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool {
|
||||
// freequencies that those behaviors occur.
|
||||
func TestEqualPeerBehaviors(t *testing.T) {
|
||||
var (
|
||||
peerID types.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
peerID p2p.NodeID = "MockPeer"
|
||||
consensusVote = bh.ConsensusVote(peerID, "voted")
|
||||
blockPart = bh.BlockPart(peerID, "blocked")
|
||||
equals = []struct {
|
||||
left []bh.PeerBehavior
|
||||
right []bh.PeerBehavior
|
||||
}{
|
||||
@@ -128,7 +128,7 @@ func TestEqualPeerBehaviors(t *testing.T) {
|
||||
func TestMockPeerBehaviorReporterConcurrency(t *testing.T) {
|
||||
var (
|
||||
behaviorScript = []struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
behaviors []bh.PeerBehavior
|
||||
}{
|
||||
{"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}},
|
||||
|
||||
@@ -5,9 +5,9 @@ import (
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -16,7 +16,7 @@ var (
|
||||
|
||||
type iIO interface {
|
||||
sendBlockRequest(peer p2p.Peer, height int64) error
|
||||
sendBlockToPeer(block *types.Block, peer p2p.Peer) error
|
||||
sendBlockToPeer(block *block.Block, peer p2p.Peer) error
|
||||
sendBlockNotFound(height int64, peer p2p.Peer) error
|
||||
sendStatusResponse(base, height int64, peer p2p.Peer) error
|
||||
|
||||
@@ -90,7 +90,7 @@ func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error {
|
||||
func (sio *switchIO) sendBlockToPeer(block *block.Block, peer p2p.Peer) error {
|
||||
if block == nil {
|
||||
panic("trying to send nil block")
|
||||
}
|
||||
|
||||
@@ -3,8 +3,10 @@ package v2
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Events generated by the processor:
|
||||
@@ -12,8 +14,8 @@ import (
|
||||
type pcBlockVerificationFailure struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
firstPeerID types.NodeID
|
||||
secondPeerID types.NodeID
|
||||
firstPeerID p2p.NodeID
|
||||
secondPeerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockVerificationFailure) String() string {
|
||||
@@ -25,7 +27,7 @@ func (e pcBlockVerificationFailure) String() string {
|
||||
type pcBlockProcessed struct {
|
||||
priorityNormal
|
||||
height int64
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e pcBlockProcessed) String() string {
|
||||
@@ -44,8 +46,8 @@ func (p pcFinished) Error() string {
|
||||
}
|
||||
|
||||
type queueItem struct {
|
||||
block *types.Block
|
||||
peerID types.NodeID
|
||||
block *block.Block
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
|
||||
type blockQueue map[int64]queueItem
|
||||
@@ -94,7 +96,7 @@ func (state *pcState) synced() bool {
|
||||
return len(state.queue) <= 1
|
||||
}
|
||||
|
||||
func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) {
|
||||
func (state *pcState) enqueue(peerID p2p.NodeID, block *block.Block, height int64) {
|
||||
if item, ok := state.queue[height]; ok {
|
||||
panic(fmt.Sprintf(
|
||||
"duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)",
|
||||
@@ -109,7 +111,7 @@ func (state *pcState) height() int64 {
|
||||
}
|
||||
|
||||
// purgePeer moves all unprocessed blocks from the queue
|
||||
func (state *pcState) purgePeer(peerID types.NodeID) {
|
||||
func (state *pcState) purgePeer(peerID p2p.NodeID) {
|
||||
// what if height is less than state.height?
|
||||
for height, item := range state.queue {
|
||||
if item.peerID == peerID {
|
||||
@@ -159,8 +161,8 @@ func (state *pcState) handle(event Event) (Event, error) {
|
||||
|
||||
var (
|
||||
first, second = firstItem.block, secondItem.block
|
||||
firstParts = first.MakePartSet(types.BlockPartSizeBytes)
|
||||
firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
firstParts = first.MakePartSet(metadata.BlockPartSizeBytes)
|
||||
firstID = metadata.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()}
|
||||
)
|
||||
|
||||
// verify if +second+ last commit "confirms" +first+ block
|
||||
|
||||
@@ -4,17 +4,18 @@ import (
|
||||
"fmt"
|
||||
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type processorContext interface {
|
||||
applyBlock(blockID types.BlockID, block *types.Block) error
|
||||
verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error
|
||||
saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit)
|
||||
applyBlock(blockID metadata.BlockID, block *block.Block) error
|
||||
verifyCommit(chainID string, blockID metadata.BlockID, height int64, commit *metadata.Commit) error
|
||||
saveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit)
|
||||
tmState() state.State
|
||||
setState(state.State)
|
||||
recordConsMetrics(block *types.Block)
|
||||
recordConsMetrics(block *block.Block)
|
||||
}
|
||||
|
||||
type pContext struct {
|
||||
@@ -33,7 +34,7 @@ func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons.
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
func (pc *pContext) applyBlock(blockID metadata.BlockID, block *block.Block) error {
|
||||
newState, err := pc.applier.ApplyBlock(pc.state, blockID, block)
|
||||
pc.state = newState
|
||||
return err
|
||||
@@ -47,15 +48,15 @@ func (pc *pContext) setState(state state.State) {
|
||||
pc.state = state
|
||||
}
|
||||
|
||||
func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
func (pc pContext) verifyCommit(chainID string, blockID metadata.BlockID, height int64, commit *metadata.Commit) error {
|
||||
return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit)
|
||||
}
|
||||
|
||||
func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
func (pc *pContext) saveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) {
|
||||
pc.store.SaveBlock(block, blockParts, seenCommit)
|
||||
}
|
||||
|
||||
func (pc *pContext) recordConsMetrics(block *types.Block) {
|
||||
func (pc *pContext) recordConsMetrics(block *block.Block) {
|
||||
pc.metrics.RecordConsMetrics(block)
|
||||
}
|
||||
|
||||
@@ -76,7 +77,7 @@ func newMockProcessorContext(
|
||||
}
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error {
|
||||
func (mpc *mockPContext) applyBlock(blockID metadata.BlockID, block *block.Block) error {
|
||||
for _, h := range mpc.applicationBL {
|
||||
if h == block.Height {
|
||||
return fmt.Errorf("generic application error")
|
||||
@@ -86,7 +87,7 @@ func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error {
|
||||
func (mpc *mockPContext) verifyCommit(chainID string, blockID metadata.BlockID, height int64, commit *metadata.Commit) error {
|
||||
for _, h := range mpc.verificationBL {
|
||||
if h == height {
|
||||
return fmt.Errorf("generic verification error")
|
||||
@@ -95,7 +96,7 @@ func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, hei
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
func (mpc *mockPContext) saveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) {
|
||||
|
||||
}
|
||||
|
||||
@@ -107,6 +108,6 @@ func (mpc *mockPContext) tmState() state.State {
|
||||
return mpc.state
|
||||
}
|
||||
|
||||
func (mpc *mockPContext) recordConsMetrics(block *types.Block) {
|
||||
func (mpc *mockPContext) recordConsMetrics(block *block.Block) {
|
||||
|
||||
}
|
||||
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmState "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// pcBlock is a test helper structure with simple types. Its purpose is to help with test readability.
|
||||
@@ -26,8 +28,8 @@ type params struct {
|
||||
}
|
||||
|
||||
// makePcBlock makes an empty block.
|
||||
func makePcBlock(height int64) *types.Block {
|
||||
return &types.Block{Header: types.Header{Height: height}}
|
||||
func makePcBlock(height int64) *block.Block {
|
||||
return &block.Block{Header: metadata.Header{Height: height}}
|
||||
}
|
||||
|
||||
// makeState takes test parameters and creates a specific processor state.
|
||||
@@ -39,7 +41,7 @@ func makeState(p *params) *pcState {
|
||||
state := newPcState(context)
|
||||
|
||||
for _, item := range p.items {
|
||||
state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height)
|
||||
}
|
||||
|
||||
state.blocksSynced = p.blocksSynced
|
||||
@@ -47,7 +49,7 @@ func makeState(p *params) *pcState {
|
||||
return state
|
||||
}
|
||||
|
||||
func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived {
|
||||
func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived {
|
||||
return scBlockReceived{
|
||||
peerID: peerID,
|
||||
block: makePcBlock(height),
|
||||
|
||||
@@ -14,9 +14,11 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/sync"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -25,8 +27,8 @@ const (
|
||||
)
|
||||
|
||||
type blockStore interface {
|
||||
LoadBlock(height int64) *types.Block
|
||||
SaveBlock(*types.Block, *types.PartSet, *types.Commit)
|
||||
LoadBlock(height int64) *block.Block
|
||||
SaveBlock(*block.Block, *metadata.PartSet, *metadata.Commit)
|
||||
Base() int64
|
||||
Height() int64
|
||||
}
|
||||
@@ -56,7 +58,7 @@ type BlockchainReactor struct {
|
||||
}
|
||||
|
||||
type blockApplier interface {
|
||||
ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error)
|
||||
ApplyBlock(state state.State, blockID metadata.BlockID, block *block.Block) (state.State, error)
|
||||
}
|
||||
|
||||
// XXX: unify naming in this package around tmState
|
||||
@@ -227,9 +229,9 @@ func (e rProcessBlock) String() string {
|
||||
type bcBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2ptypes.NodeID
|
||||
size int64
|
||||
block *types.Block
|
||||
block *block.Block
|
||||
}
|
||||
|
||||
func (resp bcBlockResponse) String() string {
|
||||
@@ -241,7 +243,7 @@ func (resp bcBlockResponse) String() string {
|
||||
type bcNoBlockResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2ptypes.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
@@ -254,7 +256,7 @@ func (resp bcNoBlockResponse) String() string {
|
||||
type bcStatusResponse struct {
|
||||
priorityNormal
|
||||
time time.Time
|
||||
peerID types.NodeID
|
||||
peerID p2ptypes.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
@@ -267,7 +269,7 @@ func (resp bcStatusResponse) String() string {
|
||||
// new peer is connected
|
||||
type bcAddNewPeer struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2ptypes.NodeID
|
||||
}
|
||||
|
||||
func (resp bcAddNewPeer) String() string {
|
||||
@@ -277,7 +279,7 @@ func (resp bcAddNewPeer) String() string {
|
||||
// existing peer is removed
|
||||
type bcRemovePeer struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
peerID p2ptypes.NodeID
|
||||
reason interface{}
|
||||
}
|
||||
|
||||
@@ -536,7 +538,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) {
|
||||
r.mtx.RUnlock()
|
||||
|
||||
case *bcproto.Message_BlockResponse:
|
||||
bi, err := types.BlockFromProto(msg.BlockResponse.Block)
|
||||
bi, err := block.BlockFromProto(msg.BlockResponse.Block)
|
||||
if err != nil {
|
||||
logger.Error("error transitioning block from protobuf", "err", err)
|
||||
_ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error()))
|
||||
|
||||
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior"
|
||||
cons "github.com/tendermint/tendermint/internal/consensus"
|
||||
@@ -23,21 +22,25 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
tmstore "github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type mockPeer struct {
|
||||
service.Service
|
||||
id types.NodeID
|
||||
id p2ptypes.NodeID
|
||||
}
|
||||
|
||||
func (mp mockPeer) FlushStop() {}
|
||||
func (mp mockPeer) ID() types.NodeID { return mp.id }
|
||||
func (mp mockPeer) ID() p2ptypes.NodeID { return mp.id }
|
||||
func (mp mockPeer) RemoteIP() net.IP { return net.IP{} }
|
||||
func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} }
|
||||
|
||||
@@ -45,8 +48,8 @@ func (mp mockPeer) IsOutbound() bool { return true }
|
||||
func (mp mockPeer) IsPersistent() bool { return true }
|
||||
func (mp mockPeer) CloseConn() error { return nil }
|
||||
|
||||
func (mp mockPeer) NodeInfo() types.NodeInfo {
|
||||
return types.NodeInfo{
|
||||
func (mp mockPeer) NodeInfo() p2ptypes.NodeInfo {
|
||||
return p2ptypes.NodeInfo{
|
||||
NodeID: "",
|
||||
ListenAddr: "",
|
||||
}
|
||||
@@ -62,7 +65,7 @@ func (mp mockPeer) Get(string) interface{} { return struct{}{} }
|
||||
|
||||
//nolint:unused
|
||||
type mockBlockStore struct {
|
||||
blocks map[int64]*types.Block
|
||||
blocks map[int64]*block.Block
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
@@ -71,12 +74,12 @@ func (ml *mockBlockStore) Height() int64 {
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *types.Block {
|
||||
func (ml *mockBlockStore) LoadBlock(height int64) *block.Block {
|
||||
return ml.blocks[height]
|
||||
}
|
||||
|
||||
//nolint:unused
|
||||
func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) {
|
||||
func (ml *mockBlockStore) SaveBlock(block *block.Block, part *metadata.PartSet, commit *metadata.Commit) {
|
||||
ml.blocks[block.Height] = block
|
||||
}
|
||||
|
||||
@@ -85,7 +88,7 @@ type mockBlockApplier struct {
|
||||
|
||||
// XXX: Add whitelist/blacklist?
|
||||
func (mba *mockBlockApplier) ApplyBlock(
|
||||
state sm.State, blockID types.BlockID, block *types.Block,
|
||||
state sm.State, blockID metadata.BlockID, block *block.Block,
|
||||
) (sm.State, error) {
|
||||
state.LastBlockHeight++
|
||||
return state, nil
|
||||
@@ -113,7 +116,7 @@ func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error {
|
||||
func (sio *mockSwitchIo) sendBlockToPeer(_ *block.Block, _ p2p.Peer) error {
|
||||
sio.mtx.Lock()
|
||||
defer sio.mtx.Unlock()
|
||||
sio.numBlockResponse++
|
||||
@@ -147,8 +150,8 @@ func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error {
|
||||
|
||||
type testReactorParams struct {
|
||||
logger log.Logger
|
||||
genDoc *types.GenesisDoc
|
||||
privVals []types.PrivValidator
|
||||
genDoc *consensus.GenesisDoc
|
||||
privVals []consensus.PrivValidator
|
||||
startHeight int64
|
||||
mockA bool
|
||||
}
|
||||
@@ -419,7 +422,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2ptypes.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numStatusResponse)
|
||||
case bcproto.BlockRequest:
|
||||
if ev.Height > params.startHeight {
|
||||
@@ -431,7 +434,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2ptypes.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numNoBlockResponse)
|
||||
} else {
|
||||
old := mockSwitch.numBlockResponse
|
||||
@@ -442,7 +445,7 @@ func TestReactorHelperMode(t *testing.T) {
|
||||
msgBz, err := proto.Marshal(msgProto)
|
||||
require.NoError(t, err)
|
||||
|
||||
reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz)
|
||||
reactor.Receive(channelID, mockPeer{id: p2ptypes.NodeID(step.peer)}, msgBz)
|
||||
assert.Equal(t, old+1, mockSwitch.numBlockResponse)
|
||||
}
|
||||
}
|
||||
@@ -475,8 +478,8 @@ type testApp struct {
|
||||
|
||||
func newReactorStore(
|
||||
t *testing.T,
|
||||
genDoc *types.GenesisDoc,
|
||||
privVals []types.PrivValidator,
|
||||
genDoc *consensus.GenesisDoc,
|
||||
privVals []consensus.PrivValidator,
|
||||
maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) {
|
||||
t.Helper()
|
||||
|
||||
@@ -502,7 +505,7 @@ func newReactorStore(
|
||||
|
||||
// add blocks in
|
||||
for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ {
|
||||
lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil)
|
||||
lastCommit := metadata.NewCommit(blockHeight-1, 0, metadata.BlockID{}, nil)
|
||||
if blockHeight > 1 {
|
||||
lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1)
|
||||
lastBlock := blockStore.LoadBlock(blockHeight - 1)
|
||||
@@ -514,14 +517,14 @@ func newReactorStore(
|
||||
time.Now(),
|
||||
)
|
||||
require.NoError(t, err)
|
||||
lastCommit = types.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()})
|
||||
lastCommit = metadata.NewCommit(vote.Height, vote.Round,
|
||||
lastBlockMeta.BlockID, []metadata.CommitSig{vote.CommitSig()})
|
||||
}
|
||||
|
||||
thisBlock := sf.MakeBlock(state, blockHeight, lastCommit)
|
||||
|
||||
thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes)
|
||||
blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
thisParts := thisBlock.MakePartSet(metadata.BlockPartSizeBytes)
|
||||
blockID := metadata.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()}
|
||||
|
||||
state, err = blockExec.ApplyBlock(state, blockID, thisBlock)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -8,7 +8,8 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// Events generated by the scheduler:
|
||||
@@ -25,7 +26,7 @@ func (e scFinishedEv) String() string {
|
||||
// send a blockRequest message
|
||||
type scBlockRequest struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
}
|
||||
|
||||
@@ -36,8 +37,8 @@ func (e scBlockRequest) String() string {
|
||||
// a block has been received and validated by the scheduler
|
||||
type scBlockReceived struct {
|
||||
priorityNormal
|
||||
peerID types.NodeID
|
||||
block *types.Block
|
||||
peerID p2p.NodeID
|
||||
block *block.Block
|
||||
}
|
||||
|
||||
func (e scBlockReceived) String() string {
|
||||
@@ -47,7 +48,7 @@ func (e scBlockReceived) String() string {
|
||||
// scheduler detected a peer error
|
||||
type scPeerError struct {
|
||||
priorityHigh
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
reason error
|
||||
}
|
||||
|
||||
@@ -58,7 +59,7 @@ func (e scPeerError) String() string {
|
||||
// scheduler removed a set of peers (timed out or slow peer)
|
||||
type scPeersPruned struct {
|
||||
priorityHigh
|
||||
peers []types.NodeID
|
||||
peers []p2p.NodeID
|
||||
}
|
||||
|
||||
func (e scPeersPruned) String() string {
|
||||
@@ -125,7 +126,7 @@ func (e peerState) String() string {
|
||||
}
|
||||
|
||||
type scPeer struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
|
||||
// initialized as New when peer is added, updated to Ready when statusUpdate is received,
|
||||
// updated to Removed when peer is removed
|
||||
@@ -142,7 +143,7 @@ func (p scPeer) String() string {
|
||||
p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID)
|
||||
}
|
||||
|
||||
func newScPeer(peerID types.NodeID) *scPeer {
|
||||
func newScPeer(peerID p2p.NodeID) *scPeer {
|
||||
return &scPeer{
|
||||
peerID: peerID,
|
||||
state: peerStateNew,
|
||||
@@ -170,7 +171,7 @@ type scheduler struct {
|
||||
|
||||
// a map of peerID to scheduler specific peer struct `scPeer` used to keep
|
||||
// track of peer specific state
|
||||
peers map[types.NodeID]*scPeer
|
||||
peers map[p2p.NodeID]*scPeer
|
||||
peerTimeout time.Duration // maximum response time from a peer otherwise prune
|
||||
minRecvRate int64 // minimum receive rate from peer otherwise prune
|
||||
|
||||
@@ -182,13 +183,13 @@ type scheduler struct {
|
||||
blockStates map[int64]blockState
|
||||
|
||||
// a map of heights to the peer we are waiting a response from
|
||||
pendingBlocks map[int64]types.NodeID
|
||||
pendingBlocks map[int64]p2p.NodeID
|
||||
|
||||
// the time at which a block was put in blockStatePending
|
||||
pendingTime map[int64]time.Time
|
||||
|
||||
// a map of heights to the peers that put the block in blockStateReceived
|
||||
receivedBlocks map[int64]types.NodeID
|
||||
receivedBlocks map[int64]p2p.NodeID
|
||||
}
|
||||
|
||||
func (sc scheduler) String() string {
|
||||
@@ -203,10 +204,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
syncTimeout: 60 * time.Second,
|
||||
height: initHeight,
|
||||
blockStates: make(map[int64]blockState),
|
||||
peers: make(map[types.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]types.NodeID),
|
||||
peers: make(map[p2p.NodeID]*scPeer),
|
||||
pendingBlocks: make(map[int64]p2p.NodeID),
|
||||
pendingTime: make(map[int64]time.Time),
|
||||
receivedBlocks: make(map[int64]types.NodeID),
|
||||
receivedBlocks: make(map[int64]p2p.NodeID),
|
||||
targetPending: 10, // TODO - pass as param
|
||||
peerTimeout: 15 * time.Second, // TODO - pass as param
|
||||
minRecvRate: 0, // int64(7680), TODO - pass as param
|
||||
@@ -215,14 +216,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler {
|
||||
return &sc
|
||||
}
|
||||
|
||||
func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer {
|
||||
func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer {
|
||||
if _, ok := sc.peers[peerID]; !ok {
|
||||
sc.peers[peerID] = newScPeer(peerID)
|
||||
}
|
||||
return sc.peers[peerID]
|
||||
}
|
||||
|
||||
func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
|
||||
func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return fmt.Errorf("couldn't find peer %s", peerID)
|
||||
@@ -237,7 +238,7 @@ func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) removePeer(peerID types.NodeID) {
|
||||
func (sc *scheduler) removePeer(peerID p2p.NodeID) {
|
||||
peer, ok := sc.peers[peerID]
|
||||
if !ok {
|
||||
return
|
||||
@@ -297,7 +298,7 @@ func (sc *scheduler) addNewBlocks() {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error {
|
||||
func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error {
|
||||
peer := sc.ensurePeer(peerID)
|
||||
|
||||
if peer.state == peerStateRemoved {
|
||||
@@ -332,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState {
|
||||
}
|
||||
}
|
||||
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
|
||||
peers := make([]types.NodeID, 0)
|
||||
func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID {
|
||||
peers := make([]p2p.NodeID, 0)
|
||||
for _, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -345,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID {
|
||||
return peers
|
||||
}
|
||||
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID {
|
||||
prunable := make([]types.NodeID, 0)
|
||||
func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID {
|
||||
prunable := make([]p2p.NodeID, 0)
|
||||
for peerID, peer := range sc.peers {
|
||||
if peer.state != peerStateReady {
|
||||
continue
|
||||
@@ -365,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) {
|
||||
}
|
||||
|
||||
// CONTRACT: peer exists and in Ready state.
|
||||
func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error {
|
||||
func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error {
|
||||
peer := sc.peers[peerID]
|
||||
|
||||
if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID {
|
||||
@@ -389,7 +390,7 @@ func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error {
|
||||
func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error {
|
||||
state := sc.getStateAtHeight(height)
|
||||
if state != blockStateNew {
|
||||
return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state)
|
||||
@@ -471,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 {
|
||||
return min
|
||||
}
|
||||
|
||||
func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
|
||||
func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 {
|
||||
var heights []int64
|
||||
for height, pendingPeerID := range sc.pendingBlocks {
|
||||
if pendingPeerID == peerID {
|
||||
@@ -481,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 {
|
||||
return heights
|
||||
}
|
||||
|
||||
func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) {
|
||||
peers := sc.getPeersWithHeight(height)
|
||||
if len(peers) == 0 {
|
||||
return "", fmt.Errorf("cannot find peer for height %d", height)
|
||||
@@ -489,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
|
||||
// create a map from number of pending requests to a list
|
||||
// of peers having that number of pending requests.
|
||||
pendingFrom := make(map[int][]types.NodeID)
|
||||
pendingFrom := make(map[int][]p2p.NodeID)
|
||||
for _, peerID := range peers {
|
||||
numPending := len(sc.pendingFrom(peerID))
|
||||
pendingFrom[numPending] = append(pendingFrom[numPending], peerID)
|
||||
@@ -508,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) {
|
||||
}
|
||||
|
||||
// PeerByID is a list of peers sorted by peerID.
|
||||
type PeerByID []types.NodeID
|
||||
type PeerByID []p2p.NodeID
|
||||
|
||||
func (peers PeerByID) Len() int {
|
||||
return len(peers)
|
||||
|
||||
@@ -10,8 +10,10 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
"github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type scTestParams struct {
|
||||
@@ -19,9 +21,9 @@ type scTestParams struct {
|
||||
initHeight int64
|
||||
height int64
|
||||
allB []int64
|
||||
pending map[int64]types.NodeID
|
||||
pending map[int64]p2p.NodeID
|
||||
pendingTime map[int64]time.Time
|
||||
received map[int64]types.NodeID
|
||||
received map[int64]p2p.NodeID
|
||||
peerTimeout time.Duration
|
||||
minRecvRate int64
|
||||
targetPending int
|
||||
@@ -40,7 +42,7 @@ func verifyScheduler(sc *scheduler) {
|
||||
}
|
||||
|
||||
func newTestScheduler(params scTestParams) *scheduler {
|
||||
peers := make(map[types.NodeID]*scPeer)
|
||||
peers := make(map[p2p.NodeID]*scPeer)
|
||||
var maxHeight int64
|
||||
|
||||
initHeight := params.initHeight
|
||||
@@ -53,8 +55,8 @@ func newTestScheduler(params scTestParams) *scheduler {
|
||||
}
|
||||
|
||||
for id, peer := range params.peers {
|
||||
peer.peerID = types.NodeID(id)
|
||||
peers[types.NodeID(id)] = peer
|
||||
peer.peerID = p2p.NodeID(id)
|
||||
peers[p2p.NodeID(id)] = peer
|
||||
if maxHeight < peer.height {
|
||||
maxHeight = peer.height
|
||||
}
|
||||
@@ -121,7 +123,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "one ready peer",
|
||||
sc: scheduler{
|
||||
height: 3,
|
||||
peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}},
|
||||
},
|
||||
wantMax: 6,
|
||||
},
|
||||
@@ -129,7 +131,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "ready and removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -139,7 +141,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "removed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: 4, state: peerStateRemoved},
|
||||
"P2": {height: 10, state: peerStateRemoved}},
|
||||
},
|
||||
@@ -149,7 +151,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "new peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {base: -1, height: -1, state: peerStateNew},
|
||||
"P2": {base: -1, height: -1, state: peerStateNew}},
|
||||
},
|
||||
@@ -159,7 +161,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
name: "mixed peers",
|
||||
sc: scheduler{
|
||||
height: 1,
|
||||
peers: map[types.NodeID]*scPeer{
|
||||
peers: map[p2p.NodeID]*scPeer{
|
||||
"P1": {height: -1, state: peerStateNew},
|
||||
"P2": {height: 10, state: peerStateReady},
|
||||
"P3": {height: 20, state: peerStateRemoved},
|
||||
@@ -186,7 +188,7 @@ func TestScMaxHeights(t *testing.T) {
|
||||
func TestScEnsurePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -243,7 +245,7 @@ func TestScTouchPeer(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
time time.Time
|
||||
}
|
||||
|
||||
@@ -315,13 +317,13 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []types.NodeID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "mixed peers",
|
||||
@@ -340,7 +342,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
"P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90},
|
||||
}},
|
||||
args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100},
|
||||
wantResult: []types.NodeID{"P4", "P5", "P6"},
|
||||
wantResult: []p2p.NodeID{"P4", "P5", "P6"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -360,7 +362,7 @@ func TestScPrunablePeers(t *testing.T) {
|
||||
func TestScRemovePeer(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -423,13 +425,13 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -437,13 +439,13 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
received: map[int64]types.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -451,15 +453,15 @@ func TestScRemovePeer(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 3: "P1"},
|
||||
received: map[int64]types.NodeID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}},
|
||||
allB: []int64{},
|
||||
pending: map[int64]types.NodeID{},
|
||||
received: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
},
|
||||
},
|
||||
{
|
||||
@@ -470,8 +472,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"},
|
||||
},
|
||||
args: args{peerID: "P1"},
|
||||
wantFields: scTestParams{
|
||||
@@ -480,8 +482,8 @@ func TestScRemovePeer(t *testing.T) {
|
||||
"P2": {height: 6, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6},
|
||||
pending: map[int64]types.NodeID{3: "P2"},
|
||||
received: map[int64]types.NodeID{4: "P2", 5: "P2"},
|
||||
pending: map[int64]p2p.NodeID{3: "P2"},
|
||||
received: map[int64]p2p.NodeID{4: "P2", 5: "P2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -500,7 +502,7 @@ func TestScRemovePeer(t *testing.T) {
|
||||
func TestScSetPeerRange(t *testing.T) {
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
base int64
|
||||
height int64
|
||||
}
|
||||
@@ -621,25 +623,25 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult []types.NodeID
|
||||
wantResult []p2p.NodeID
|
||||
}{
|
||||
{
|
||||
name: "no peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{}},
|
||||
args: args{height: 10},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only new peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}},
|
||||
args: args{height: 10},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "only Removed peers",
|
||||
fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}},
|
||||
args: args{height: 2},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready shorter peer",
|
||||
@@ -648,7 +650,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 5},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "one Ready equal peer",
|
||||
@@ -657,7 +659,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer",
|
||||
@@ -667,7 +669,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer at base",
|
||||
@@ -677,7 +679,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{"P1"},
|
||||
wantResult: []p2p.NodeID{"P1"},
|
||||
},
|
||||
{
|
||||
name: "one Ready higher peer with higher base",
|
||||
@@ -687,7 +689,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
},
|
||||
args: args{height: 4},
|
||||
wantResult: []types.NodeID{},
|
||||
wantResult: []p2p.NodeID{},
|
||||
},
|
||||
{
|
||||
name: "multiple mixed peers",
|
||||
@@ -702,7 +704,7 @@ func TestScGetPeersWithHeight(t *testing.T) {
|
||||
allB: []int64{8, 9, 10, 11},
|
||||
},
|
||||
args: args{height: 8},
|
||||
wantResult: []types.NodeID{"P2", "P5"},
|
||||
wantResult: []p2p.NodeID{"P2", "P5"},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -724,7 +726,7 @@ func TestScMarkPending(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
tm time.Time
|
||||
}
|
||||
@@ -820,14 +822,14 @@ func TestScMarkPending(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)},
|
||||
},
|
||||
},
|
||||
@@ -850,7 +852,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
now := time.Now()
|
||||
|
||||
type args struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
height int64
|
||||
size int64
|
||||
tm time.Time
|
||||
@@ -890,7 +892,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
@@ -899,7 +901,7 @@ func TestScMarkReceived(t *testing.T) {
|
||||
"P2": {height: 4, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -908,13 +910,13 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{},
|
||||
pending: map[int64]p2p.NodeID{},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
@@ -923,14 +925,14 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)},
|
||||
},
|
||||
wantErr: true,
|
||||
@@ -940,16 +942,16 @@ func TestScMarkReceived(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now},
|
||||
},
|
||||
args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now},
|
||||
received: map[int64]types.NodeID{2: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1"},
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -990,7 +992,7 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
targetPending: 1,
|
||||
},
|
||||
@@ -1008,15 +1010,15 @@ func TestScMarkProcessed(t *testing.T) {
|
||||
height: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{1, 2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
received: map[int64]types.NodeID{1: "P1"}},
|
||||
received: map[int64]p2p.NodeID{1: "P1"}},
|
||||
args: args{height: 1},
|
||||
wantFields: scTestParams{
|
||||
height: 2,
|
||||
peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}},
|
||||
allB: []int64{2},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now}},
|
||||
},
|
||||
}
|
||||
@@ -1100,7 +1102,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1110,7 +1112,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantResult: false,
|
||||
},
|
||||
@@ -1121,7 +1123,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
peers: map[string]*scPeer{
|
||||
"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{4},
|
||||
received: map[int64]types.NodeID{4: "P1"},
|
||||
received: map[int64]p2p.NodeID{4: "P1"},
|
||||
},
|
||||
wantResult: true,
|
||||
},
|
||||
@@ -1130,7 +1132,7 @@ func TestScAllBlocksProcessed(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{2: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now, 4: now},
|
||||
},
|
||||
wantResult: false,
|
||||
@@ -1178,7 +1180,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now},
|
||||
},
|
||||
wantHeight: -1,
|
||||
@@ -1189,7 +1191,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"},
|
||||
},
|
||||
wantHeight: -1,
|
||||
},
|
||||
@@ -1208,7 +1210,7 @@ func TestScNextHeightToSchedule(t *testing.T) {
|
||||
initHeight: 1,
|
||||
peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
pending: map[int64]types.NodeID{2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: now},
|
||||
},
|
||||
wantHeight: 1,
|
||||
@@ -1238,7 +1240,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
name string
|
||||
fields scTestParams
|
||||
args args
|
||||
wantResult types.NodeID
|
||||
wantResult p2p.NodeID
|
||||
wantError bool
|
||||
}{
|
||||
{
|
||||
@@ -1306,7 +1308,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P2": {height: 9, state: peerStateReady}},
|
||||
allB: []int64{4, 5, 6, 7, 8, 9},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
4: "P1", 6: "P1",
|
||||
5: "P2",
|
||||
},
|
||||
@@ -1322,7 +1324,7 @@ func TestScSelectPeer(t *testing.T) {
|
||||
"P1": {height: 15, state: peerStateReady},
|
||||
"P3": {height: 15, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -1348,8 +1350,8 @@ func TestScSelectPeer(t *testing.T) {
|
||||
}
|
||||
|
||||
// makeScBlock makes an empty block.
|
||||
func makeScBlock(height int64) *types.Block {
|
||||
return &types.Block{Header: types.Header{Height: height}}
|
||||
func makeScBlock(height int64) *block.Block {
|
||||
return &block.Block{Header: metadata.Header{Height: height}}
|
||||
}
|
||||
|
||||
// used in place of assert.Equal(t, want, actual) to avoid failures due to
|
||||
@@ -1391,7 +1393,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
block6FromP1 := bcBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
size: 100,
|
||||
block: makeScBlock(6),
|
||||
}
|
||||
@@ -1432,7 +1434,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1443,7 +1445,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now.Add(time.Second)},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1454,7 +1456,7 @@ func TestScHandleBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: block6FromP1},
|
||||
@@ -1476,7 +1478,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
now := time.Now()
|
||||
noBlock6FromP1 := bcNoBlockResponse{
|
||||
time: now.Add(time.Millisecond),
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1512,14 +1514,14 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: noOpEvent{},
|
||||
wantFields: scTestParams{
|
||||
peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P2"},
|
||||
pending: map[int64]p2p.NodeID{6: "P2"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
},
|
||||
@@ -1528,7 +1530,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
fields: scTestParams{
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")},
|
||||
@@ -1551,7 +1553,7 @@ func TestScHandleNoBlockResponse(t *testing.T) {
|
||||
func TestScHandleBlockProcessed(t *testing.T) {
|
||||
now := time.Now()
|
||||
processed6FromP1 := pcBlockProcessed{
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
height: 6,
|
||||
}
|
||||
|
||||
@@ -1578,7 +1580,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
@@ -1590,7 +1592,7 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1601,8 +1603,8 @@ func TestScHandleBlockProcessed(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1"},
|
||||
},
|
||||
args: args{event: processed6FromP1},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1645,7 +1647,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1657,7 +1659,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{6, 7, 8},
|
||||
pending: map[int64]types.NodeID{6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{6: "P1"},
|
||||
pendingTime: map[int64]time.Time{6: now},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -1669,7 +1671,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 6,
|
||||
peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}},
|
||||
allB: []int64{6, 7},
|
||||
received: map[int64]types.NodeID{6: "P1", 7: "P1"},
|
||||
received: map[int64]p2p.NodeID{6: "P1", 7: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: scFinishedEv{},
|
||||
@@ -1680,8 +1682,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
initHeight: 5,
|
||||
peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1696,8 +1698,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
"P3": {height: 8, state: peerStateReady},
|
||||
},
|
||||
allB: []int64{5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]types.NodeID{5: "P1", 6: "P1"},
|
||||
pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"},
|
||||
received: map[int64]p2p.NodeID{5: "P1", 6: "P1"},
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}},
|
||||
wantEvent: noOpEvent{},
|
||||
@@ -1716,7 +1718,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) {
|
||||
|
||||
func TestScHandleAddNewPeer(t *testing.T) {
|
||||
addP1 := bcAddNewPeer{
|
||||
peerID: types.NodeID("P1"),
|
||||
peerID: p2p.NodeID("P1"),
|
||||
}
|
||||
type args struct {
|
||||
event bcAddNewPeer
|
||||
@@ -1827,7 +1829,7 @@ func TestScHandleTryPrunePeer(t *testing.T) {
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7},
|
||||
peerTimeout: time.Second},
|
||||
args: args{event: pruneEv},
|
||||
wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}},
|
||||
wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}},
|
||||
},
|
||||
{
|
||||
name: "mixed peers, finish after pruning",
|
||||
@@ -1925,7 +1927,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady},
|
||||
"P2": {height: 5, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P2",
|
||||
},
|
||||
@@ -1943,7 +1945,7 @@ func TestScHandleTrySchedule(t *testing.T) {
|
||||
"P1": {height: 8, state: peerStateReady},
|
||||
"P3": {height: 8, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3, 4, 5, 6, 7, 8},
|
||||
pending: map[int64]types.NodeID{
|
||||
pending: map[int64]p2p.NodeID{
|
||||
1: "P1", 2: "P1",
|
||||
3: "P3", 4: "P3",
|
||||
5: "P2", 6: "P2",
|
||||
@@ -2105,7 +2107,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2117,7 +2119,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2129,7 +2131,7 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]},
|
||||
height: 1,
|
||||
},
|
||||
@@ -2141,9 +2143,9 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{2: "P1", 3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]},
|
||||
received: map[int64]types.NodeID{1: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -2154,9 +2156,9 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
pending: map[int64]types.NodeID{3: "P1"},
|
||||
pending: map[int64]p2p.NodeID{3: "P1"},
|
||||
pendingTime: map[int64]time.Time{3: tick[3]},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
@@ -2167,29 +2169,29 @@ func TestScHandle(t *testing.T) {
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
{ // processed block 1
|
||||
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}},
|
||||
wantEvent: noOpEvent{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{2, 3},
|
||||
received: map[int64]types.NodeID{2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{2: "P1", 3: "P1"},
|
||||
height: 2,
|
||||
},
|
||||
},
|
||||
{ // processed block 2
|
||||
args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}},
|
||||
args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}},
|
||||
wantEvent: scFinishedEv{},
|
||||
wantSc: &scTestParams{
|
||||
startTime: now,
|
||||
peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{3},
|
||||
received: map[int64]types.NodeID{3: "P1"},
|
||||
received: map[int64]p2p.NodeID{3: "P1"},
|
||||
height: 3,
|
||||
},
|
||||
},
|
||||
@@ -2205,7 +2207,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateReady, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3, 4},
|
||||
received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"},
|
||||
height: 1,
|
||||
},
|
||||
args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}},
|
||||
@@ -2216,7 +2218,7 @@ func TestScHandle(t *testing.T) {
|
||||
"P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]},
|
||||
"P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}},
|
||||
allB: []int64{1, 2, 3},
|
||||
received: map[int64]types.NodeID{},
|
||||
received: map[int64]p2p.NodeID{},
|
||||
height: 1,
|
||||
},
|
||||
},
|
||||
|
||||
@@ -11,18 +11,22 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/evidence"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
evtypes "github.com/tendermint/tendermint/pkg/evidence"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
@@ -54,7 +58,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
vals := consensus.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
blockDB := dbm.NewMemDB()
|
||||
@@ -85,7 +89,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
pv := privVals[i]
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus := events.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
err = eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -100,7 +104,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock
|
||||
|
||||
var bzNodeID types.NodeID
|
||||
var bzNodeID p2ptypes.NodeID
|
||||
|
||||
// Set the first state's reactor as the dedicated byzantine reactor and grab
|
||||
// the NodeID that corresponds to the state so we can reference the reactor.
|
||||
@@ -125,7 +129,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
prevote2, err := bzNodeState.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
prevote2, err := bzNodeState.signVote(tmproto.PrevoteType, nil, metadata.PartSetHeader{})
|
||||
require.NoError(t, err)
|
||||
|
||||
// send two votes to all peers (1st to one half, 2nd to another half)
|
||||
@@ -167,12 +171,12 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
lazyNodeState.Logger.Info("Lazy Proposer proposing condensed commit")
|
||||
require.NotNil(t, lazyNodeState.privValidator)
|
||||
|
||||
var commit *types.Commit
|
||||
var commit *metadata.Commit
|
||||
switch {
|
||||
case lazyNodeState.Height == lazyNodeState.state.InitialHeight:
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
commit = metadata.NewCommit(0, 0, metadata.BlockID{}, nil)
|
||||
case lazyNodeState.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
commit = lazyNodeState.LastCommit.MakeCommit()
|
||||
@@ -182,7 +186,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
|
||||
// omit the last signature in the commit
|
||||
commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent()
|
||||
commit.Signatures[len(commit.Signatures)-1] = metadata.NewCommitSigAbsent()
|
||||
|
||||
if lazyNodeState.privValidatorPubKey == nil {
|
||||
// If this node is a validator & proposer in the current round, it will
|
||||
@@ -203,8 +207,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID)
|
||||
propBlockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
proposal := consensus.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID)
|
||||
p := proposal.ToProto()
|
||||
if err := lazyNodeState.privValidator.SignProposal(context.Background(), lazyNodeState.state.ChainID, p); err == nil {
|
||||
proposal.Signature = p.Signature
|
||||
@@ -229,20 +233,20 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
// Evidence should be submitted and committed at the third height but
|
||||
// we will check the first six just in case
|
||||
evidenceFromEachValidator := make([]types.Evidence, nValidators)
|
||||
evidenceFromEachValidator := make([]evtypes.Evidence, nValidators)
|
||||
|
||||
wg := new(sync.WaitGroup)
|
||||
i := 0
|
||||
for _, sub := range rts.subs {
|
||||
wg.Add(1)
|
||||
|
||||
go func(j int, s types.Subscription) {
|
||||
go func(j int, s events.Subscription) {
|
||||
defer wg.Done()
|
||||
for {
|
||||
select {
|
||||
case msg := <-s.Out():
|
||||
require.NotNil(t, msg)
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
block := msg.Data().(events.EventDataNewBlock).Block
|
||||
if len(block.Evidence.Evidence) != 0 {
|
||||
evidenceFromEachValidator[j] = block.Evidence.Evidence[0]
|
||||
return
|
||||
@@ -264,7 +268,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) {
|
||||
|
||||
for idx, ev := range evidenceFromEachValidator {
|
||||
if assert.NotNil(t, ev, idx) {
|
||||
ev, ok := ev.(*types.DuplicateVoteEvidence)
|
||||
ev, ok := ev.(*evtypes.DuplicateVoteEvidence)
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress)
|
||||
assert.Equal(t, prevoteHeight, ev.Height())
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cstypes "github.com/tendermint/tendermint/internal/consensus/types"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
@@ -31,11 +30,15 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -86,14 +89,14 @@ type validatorStub struct {
|
||||
Index int32 // Validator index. NOTE: we don't assume validator set changes.
|
||||
Height int64
|
||||
Round int32
|
||||
types.PrivValidator
|
||||
consensus.PrivValidator
|
||||
VotingPower int64
|
||||
lastVote *types.Vote
|
||||
lastVote *consensus.Vote
|
||||
}
|
||||
|
||||
const testMinPower int64 = 10
|
||||
|
||||
func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *validatorStub {
|
||||
func newValidatorStub(privValidator consensus.PrivValidator, valIndex int32) *validatorStub {
|
||||
return &validatorStub{
|
||||
Index: valIndex,
|
||||
PrivValidator: privValidator,
|
||||
@@ -105,21 +108,21 @@ func (vs *validatorStub) signVote(
|
||||
config *cfg.Config,
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader) (*types.Vote, error) {
|
||||
header metadata.PartSetHeader) (*consensus.Vote, error) {
|
||||
|
||||
pubKey, err := vs.PrivValidator.GetPubKey(context.Background())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't get pubkey: %w", err)
|
||||
}
|
||||
|
||||
vote := &types.Vote{
|
||||
vote := &consensus.Vote{
|
||||
ValidatorIndex: vs.Index,
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
Height: vs.Height,
|
||||
Round: vs.Round,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: voteType,
|
||||
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
|
||||
BlockID: metadata.BlockID{Hash: hash, PartSetHeader: header},
|
||||
}
|
||||
v := vote.ToProto()
|
||||
if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil {
|
||||
@@ -144,7 +147,7 @@ func signVote(
|
||||
config *cfg.Config,
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader) *types.Vote {
|
||||
header metadata.PartSetHeader) *consensus.Vote {
|
||||
|
||||
v, err := vs.signVote(config, voteType, hash, header)
|
||||
if err != nil {
|
||||
@@ -160,9 +163,9 @@ func signVotes(
|
||||
config *cfg.Config,
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader,
|
||||
vss ...*validatorStub) []*types.Vote {
|
||||
votes := make([]*types.Vote, len(vss))
|
||||
header metadata.PartSetHeader,
|
||||
vss ...*validatorStub) []*consensus.Vote {
|
||||
votes := make([]*consensus.Vote, len(vss))
|
||||
for i, vs := range vss {
|
||||
votes[i] = signVote(vs, config, voteType, hash, header)
|
||||
}
|
||||
@@ -225,7 +228,7 @@ func decideProposal(
|
||||
vs *validatorStub,
|
||||
height int64,
|
||||
round int32,
|
||||
) (proposal *types.Proposal, block *types.Block) {
|
||||
) (proposal *consensus.Proposal, block *block.Block) {
|
||||
cs1.mtx.Lock()
|
||||
block, blockParts := cs1.createProposalBlock()
|
||||
validRound := cs1.ValidRound
|
||||
@@ -236,8 +239,8 @@ func decideProposal(
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
proposal = types.NewProposal(height, round, polRound, propBlockID)
|
||||
polRound, propBlockID := validRound, metadata.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
proposal = consensus.NewProposal(height, round, polRound, propBlockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vs.SignProposal(context.Background(), chainID, p); err != nil {
|
||||
panic(err)
|
||||
@@ -248,7 +251,7 @@ func decideProposal(
|
||||
return
|
||||
}
|
||||
|
||||
func addVotes(to *State, votes ...*types.Vote) {
|
||||
func addVotes(to *State, votes ...*consensus.Vote) {
|
||||
for _, vote := range votes {
|
||||
to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}}
|
||||
}
|
||||
@@ -259,7 +262,7 @@ func signAddVotes(
|
||||
to *State,
|
||||
voteType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader,
|
||||
header metadata.PartSetHeader,
|
||||
vss ...*validatorStub,
|
||||
) {
|
||||
votes := signVotes(config, voteType, hash, header, vss...)
|
||||
@@ -271,7 +274,7 @@ func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStu
|
||||
pubKey, err := privVal.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
address := pubKey.Address()
|
||||
var vote *types.Vote
|
||||
var vote *consensus.Vote
|
||||
if vote = prevotes.GetByAddress(address); vote == nil {
|
||||
panic("Failed to find prevote from validator")
|
||||
}
|
||||
@@ -291,7 +294,7 @@ func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, bloc
|
||||
pv, err := privVal.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
address := pv.Address()
|
||||
var vote *types.Vote
|
||||
var vote *consensus.Vote
|
||||
if vote = votes.GetByAddress(address); vote == nil {
|
||||
panic("Failed to find precommit from validator")
|
||||
}
|
||||
@@ -313,7 +316,7 @@ func validatePrecommit(
|
||||
pv, err := privVal.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
address := pv.Address()
|
||||
var vote *types.Vote
|
||||
var vote *consensus.Vote
|
||||
if vote = precommits.GetByAddress(address); vote == nil {
|
||||
panic("Failed to find precommit from validator")
|
||||
}
|
||||
@@ -366,14 +369,14 @@ func validatePrevoteAndPrecommit(
|
||||
}
|
||||
|
||||
func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message {
|
||||
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote)
|
||||
votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, events.EventQueryVote)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote))
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, events.EventQueryVote))
|
||||
}
|
||||
ch := make(chan tmpubsub.Message)
|
||||
go func() {
|
||||
for msg := range votesSub.Out() {
|
||||
vote := msg.Data().(types.EventDataVote)
|
||||
vote := msg.Data().(events.EventDataVote)
|
||||
// we only fire for our own votes
|
||||
if bytes.Equal(addr, vote.Vote.ValidatorAddress) {
|
||||
ch <- msg
|
||||
@@ -386,7 +389,7 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message {
|
||||
//-------------------------------------------------------------------------------
|
||||
// consensus states
|
||||
|
||||
func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State {
|
||||
func newState(state sm.State, pv consensus.PrivValidator, app abci.Application) *State {
|
||||
config := cfg.ResetTestRoot("consensus_state_test")
|
||||
return newStateWithConfig(config, state, pv, app)
|
||||
}
|
||||
@@ -394,7 +397,7 @@ func newState(state sm.State, pv types.PrivValidator, app abci.Application) *Sta
|
||||
func newStateWithConfig(
|
||||
thisConfig *cfg.Config,
|
||||
state sm.State,
|
||||
pv types.PrivValidator,
|
||||
pv consensus.PrivValidator,
|
||||
app abci.Application,
|
||||
) *State {
|
||||
blockStore := store.NewBlockStore(dbm.NewMemDB())
|
||||
@@ -404,7 +407,7 @@ func newStateWithConfig(
|
||||
func newStateWithConfigAndBlockStore(
|
||||
thisConfig *cfg.Config,
|
||||
state sm.State,
|
||||
pv types.PrivValidator,
|
||||
pv consensus.PrivValidator,
|
||||
app abci.Application,
|
||||
blockStore *store.BlockStore,
|
||||
) *State {
|
||||
@@ -434,7 +437,7 @@ func newStateWithConfigAndBlockStore(
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus := events.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
err := eventBus.Start()
|
||||
if err != nil {
|
||||
@@ -519,7 +522,7 @@ func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int32, timeo
|
||||
case <-time.After(timeout):
|
||||
panic(errorMessage)
|
||||
case msg := <-ch:
|
||||
roundStateEvent, ok := msg.Data().(types.EventDataRoundState)
|
||||
roundStateEvent, ok := msg.Data().(events.EventDataRoundState)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
@@ -539,7 +542,7 @@ func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int32)
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewRound event")
|
||||
case msg := <-roundCh:
|
||||
newRoundEvent, ok := msg.Data().(types.EventDataNewRound)
|
||||
newRoundEvent, ok := msg.Data().(events.EventDataNewRound)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
@@ -564,7 +567,7 @@ func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round i
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case msg := <-proposalCh:
|
||||
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
||||
proposalEvent, ok := msg.Data().(events.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
@@ -588,7 +591,7 @@ func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewBlock event")
|
||||
case msg := <-blockCh:
|
||||
blockEvent, ok := msg.Data().(types.EventDataNewBlock)
|
||||
blockEvent, ok := msg.Data().(events.EventDataNewBlock)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
@@ -604,7 +607,7 @@ func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHa
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewBlockHeader event")
|
||||
case msg := <-blockCh:
|
||||
blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader)
|
||||
blockHeaderEvent, ok := msg.Data().(events.EventDataNewBlockHeader)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
@@ -623,12 +626,12 @@ func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int32
|
||||
"Timeout expired while waiting for NewUnlock event")
|
||||
}
|
||||
|
||||
func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) {
|
||||
func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID metadata.BlockID) {
|
||||
select {
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewProposal event")
|
||||
case msg := <-proposalCh:
|
||||
proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal)
|
||||
proposalEvent, ok := msg.Data().(events.EventDataCompleteProposal)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
@@ -659,7 +662,7 @@ func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32,
|
||||
case <-time.After(ensureTimeout):
|
||||
panic("Timeout expired while waiting for NewVote event")
|
||||
case msg := <-voteCh:
|
||||
voteEvent, ok := msg.Data().(types.EventDataVote)
|
||||
voteEvent, ok := msg.Data().(events.EventDataVote)
|
||||
if !ok {
|
||||
panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?",
|
||||
msg.Data()))
|
||||
@@ -739,7 +742,7 @@ func randConsensusState(
|
||||
closeFuncs = append(closeFuncs, appCloser.Close)
|
||||
}
|
||||
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
vals := consensus.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore)
|
||||
@@ -765,7 +768,7 @@ func randConsensusNetWithPeers(
|
||||
testName string,
|
||||
tickerFunc func() TimeoutTicker,
|
||||
appFunc func(string) abci.Application,
|
||||
) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) {
|
||||
) ([]*State, *consensus.GenesisDoc, *cfg.Config, cleanupFunc) {
|
||||
genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, testMinPower)
|
||||
css := make([]*State, nPeers)
|
||||
logger := consensusLogger()
|
||||
@@ -780,7 +783,7 @@ func randConsensusNetWithPeers(
|
||||
if i == 0 {
|
||||
peer0Config = thisConfig
|
||||
}
|
||||
var privVal types.PrivValidator
|
||||
var privVal consensus.PrivValidator
|
||||
if i < nValidators {
|
||||
privVal = privVals[i]
|
||||
} else {
|
||||
@@ -800,7 +803,7 @@ func randConsensusNetWithPeers(
|
||||
}
|
||||
|
||||
app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i)))
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
vals := consensus.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok {
|
||||
// simulate handshake, receive app version. If don't do this, replay test will fail
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion
|
||||
@@ -823,7 +826,7 @@ func randGenesisState(
|
||||
config *cfg.Config,
|
||||
numValidators int,
|
||||
randPower bool,
|
||||
minPower int64) (sm.State, []types.PrivValidator) {
|
||||
minPower int64) (sm.State, []consensus.PrivValidator) {
|
||||
|
||||
genDoc, privValidators := factory.RandGenesisDoc(config, numValidators, randPower, minPower)
|
||||
s0, _ := sm.MakeGenesisState(genDoc)
|
||||
@@ -891,7 +894,7 @@ func newPersistentKVStoreWithPath(dbDir string) abci.Application {
|
||||
return kvstore.NewPersistentKVStoreApplication(dbDir)
|
||||
}
|
||||
|
||||
func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool {
|
||||
func signDataIsEqual(v1 *consensus.Vote, v2 *tmproto.Vote) bool {
|
||||
if v1 == nil || v2 == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -9,9 +9,11 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
@@ -59,7 +61,7 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
for _, sub := range rts.subs {
|
||||
wg.Add(1)
|
||||
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
<-s.Out()
|
||||
wg.Done()
|
||||
}(sub)
|
||||
@@ -69,7 +71,7 @@ func TestReactorInvalidPrecommit(t *testing.T) {
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv types.PrivValidator) {
|
||||
func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv consensus.PrivValidator) {
|
||||
// routine to:
|
||||
// - precommit for a random block
|
||||
// - send precommit to all peers
|
||||
@@ -86,16 +88,16 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r
|
||||
|
||||
// precommit a random block
|
||||
blockHash := bytes.HexBytes(tmrand.Bytes(32))
|
||||
precommit := &types.Vote{
|
||||
precommit := &consensus.Vote{
|
||||
ValidatorAddress: addr,
|
||||
ValidatorIndex: valIndex,
|
||||
Height: cs.Height,
|
||||
Round: cs.Round,
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: types.BlockID{
|
||||
BlockID: metadata.BlockID{
|
||||
Hash: blockHash,
|
||||
PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}},
|
||||
PartSetHeader: metadata.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}},
|
||||
}
|
||||
|
||||
p := precommit.ToProto()
|
||||
|
||||
@@ -14,11 +14,12 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
mempl "github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// for testing
|
||||
@@ -37,7 +38,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
newBlockCh := subscribe(cs.eventBus, events.EventQueryNewBlock)
|
||||
startTestRound(cs, height, round)
|
||||
|
||||
ensureNewEventOnChannel(newBlockCh) // first block gets committed
|
||||
@@ -60,7 +61,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
|
||||
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
newBlockCh := subscribe(cs.eventBus, events.EventQueryNewBlock)
|
||||
startTestRound(cs, cs.Height, cs.Round)
|
||||
|
||||
ensureNewEventOnChannel(newBlockCh) // first block gets committed
|
||||
@@ -79,10 +80,10 @@ func TestMempoolProgressInHigherRound(t *testing.T) {
|
||||
cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication())
|
||||
assertMempool(cs.txNotifier).EnableTxsAvailable()
|
||||
height, round := cs.Height, cs.Round
|
||||
newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock)
|
||||
newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
|
||||
timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
|
||||
cs.setProposal = func(proposal *types.Proposal) error {
|
||||
newBlockCh := subscribe(cs.eventBus, events.EventQueryNewBlock)
|
||||
newRoundCh := subscribe(cs.eventBus, events.EventQueryNewRound)
|
||||
timeoutCh := subscribe(cs.eventBus, events.EventQueryTimeoutPropose)
|
||||
cs.setProposal = func(proposal *consensus.Proposal) error {
|
||||
if cs.Height == 2 && cs.Round == 0 {
|
||||
// dont set the proposal in round 0 so we timeout and
|
||||
// go to next round
|
||||
@@ -129,7 +130,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore)
|
||||
err := stateStore.Save(state)
|
||||
require.NoError(t, err)
|
||||
newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader)
|
||||
newBlockHeaderCh := subscribe(cs.eventBus, events.EventQueryNewBlockHeader)
|
||||
|
||||
const numTxs int64 = 3000
|
||||
go deliverTxsRange(cs, 0, int(numTxs))
|
||||
@@ -138,7 +139,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) {
|
||||
for n := int64(0); n < numTxs; {
|
||||
select {
|
||||
case msg := <-newBlockHeaderCh:
|
||||
headerEvent := msg.Data().(types.EventDataNewBlockHeader)
|
||||
headerEvent := msg.Data().(events.EventDataNewBlockHeader)
|
||||
n += headerEvent.NumTxs
|
||||
case <-time.After(30 * time.Second):
|
||||
t.Fatal("Timed out waiting 30s to commit blocks with transactions")
|
||||
|
||||
@@ -3,7 +3,7 @@ package consensus
|
||||
import (
|
||||
"github.com/go-kit/kit/metrics"
|
||||
"github.com/go-kit/kit/metrics/discard"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
|
||||
prometheus "github.com/go-kit/kit/metrics/prometheus"
|
||||
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
||||
@@ -221,7 +221,7 @@ func NopMetrics() *Metrics {
|
||||
}
|
||||
|
||||
// RecordConsMetrics uses for recording the block related metrics during fast-sync.
|
||||
func (m *Metrics) RecordConsMetrics(block *types.Block) {
|
||||
func (m *Metrics) RecordConsMetrics(block *block.Block) {
|
||||
m.NumTxs.Set(float64(len(block.Data.Txs)))
|
||||
m.TotalTxs.Add(float64(len(block.Data.Txs)))
|
||||
m.BlockSizeBytes.Observe(float64(block.Size()))
|
||||
|
||||
@@ -8,9 +8,12 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Message defines an interface that the consensus domain types implement. When
|
||||
@@ -95,7 +98,7 @@ func (m *NewRoundStepMessage) String() string {
|
||||
type NewValidBlockMessage struct {
|
||||
Height int64
|
||||
Round int32
|
||||
BlockPartSetHeader types.PartSetHeader
|
||||
BlockPartSetHeader metadata.PartSetHeader
|
||||
BlockParts *bits.BitArray
|
||||
IsCommit bool
|
||||
}
|
||||
@@ -119,8 +122,8 @@ func (m *NewValidBlockMessage) ValidateBasic() error {
|
||||
m.BlockParts.Size(),
|
||||
m.BlockPartSetHeader.Total)
|
||||
}
|
||||
if m.BlockParts.Size() > int(types.MaxBlockPartsCount) {
|
||||
return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount)
|
||||
if m.BlockParts.Size() > int(metadata.MaxBlockPartsCount) {
|
||||
return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), metadata.MaxBlockPartsCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -133,7 +136,7 @@ func (m *NewValidBlockMessage) String() string {
|
||||
|
||||
// ProposalMessage is sent when a new block is proposed.
|
||||
type ProposalMessage struct {
|
||||
Proposal *types.Proposal
|
||||
Proposal *consensus.Proposal
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
@@ -164,8 +167,8 @@ func (m *ProposalPOLMessage) ValidateBasic() error {
|
||||
if m.ProposalPOL.Size() == 0 {
|
||||
return errors.New("empty ProposalPOL bit array")
|
||||
}
|
||||
if m.ProposalPOL.Size() > types.MaxVotesCount {
|
||||
return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount)
|
||||
if m.ProposalPOL.Size() > consensus.MaxVotesCount {
|
||||
return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), consensus.MaxVotesCount)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -179,7 +182,7 @@ func (m *ProposalPOLMessage) String() string {
|
||||
type BlockPartMessage struct {
|
||||
Height int64
|
||||
Round int32
|
||||
Part *types.Part
|
||||
Part *metadata.Part
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
@@ -203,7 +206,7 @@ func (m *BlockPartMessage) String() string {
|
||||
|
||||
// VoteMessage is sent when voting for a proposal (or lack thereof).
|
||||
type VoteMessage struct {
|
||||
Vote *types.Vote
|
||||
Vote *consensus.Vote
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
@@ -232,7 +235,7 @@ func (m *HasVoteMessage) ValidateBasic() error {
|
||||
if m.Round < 0 {
|
||||
return errors.New("negative Round")
|
||||
}
|
||||
if !types.IsVoteTypeValid(m.Type) {
|
||||
if !consensus.IsVoteTypeValid(m.Type) {
|
||||
return errors.New("invalid Type")
|
||||
}
|
||||
if m.Index < 0 {
|
||||
@@ -251,7 +254,7 @@ type VoteSetMaj23Message struct {
|
||||
Height int64
|
||||
Round int32
|
||||
Type tmproto.SignedMsgType
|
||||
BlockID types.BlockID
|
||||
BlockID metadata.BlockID
|
||||
}
|
||||
|
||||
// ValidateBasic performs basic validation.
|
||||
@@ -262,7 +265,7 @@ func (m *VoteSetMaj23Message) ValidateBasic() error {
|
||||
if m.Round < 0 {
|
||||
return errors.New("negative Round")
|
||||
}
|
||||
if !types.IsVoteTypeValid(m.Type) {
|
||||
if !consensus.IsVoteTypeValid(m.Type) {
|
||||
return errors.New("invalid Type")
|
||||
}
|
||||
if err := m.BlockID.ValidateBasic(); err != nil {
|
||||
@@ -283,7 +286,7 @@ type VoteSetBitsMessage struct {
|
||||
Height int64
|
||||
Round int32
|
||||
Type tmproto.SignedMsgType
|
||||
BlockID types.BlockID
|
||||
BlockID metadata.BlockID
|
||||
Votes *bits.BitArray
|
||||
}
|
||||
|
||||
@@ -292,7 +295,7 @@ func (m *VoteSetBitsMessage) ValidateBasic() error {
|
||||
if m.Height < 0 {
|
||||
return errors.New("negative Height")
|
||||
}
|
||||
if !types.IsVoteTypeValid(m.Type) {
|
||||
if !consensus.IsVoteTypeValid(m.Type) {
|
||||
return errors.New("invalid Type")
|
||||
}
|
||||
if err := m.BlockID.ValidateBasic(); err != nil {
|
||||
@@ -300,8 +303,8 @@ func (m *VoteSetBitsMessage) ValidateBasic() error {
|
||||
}
|
||||
|
||||
// NOTE: Votes.Size() can be zero if the node does not have any
|
||||
if m.Votes.Size() > types.MaxVotesCount {
|
||||
return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount)
|
||||
if m.Votes.Size() > consensus.MaxVotesCount {
|
||||
return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), consensus.MaxVotesCount)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -465,7 +468,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
LastCommitRound: msg.NewRoundStep.LastCommitRound,
|
||||
}
|
||||
case *tmcons.Message_NewValidBlock:
|
||||
pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
|
||||
pbPartSetHeader, err := metadata.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parts header to proto error: %w", err)
|
||||
}
|
||||
@@ -484,7 +487,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
IsCommit: msg.NewValidBlock.IsCommit,
|
||||
}
|
||||
case *tmcons.Message_Proposal:
|
||||
pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal)
|
||||
pbP, err := consensus.ProposalFromProto(&msg.Proposal.Proposal)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("proposal msg to proto error: %w", err)
|
||||
}
|
||||
@@ -504,7 +507,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
ProposalPOL: pbBits,
|
||||
}
|
||||
case *tmcons.Message_BlockPart:
|
||||
parts, err := types.PartFromProto(&msg.BlockPart.Part)
|
||||
parts, err := metadata.PartFromProto(&msg.BlockPart.Part)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("blockpart msg to proto error: %w", err)
|
||||
}
|
||||
@@ -514,7 +517,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
Part: parts,
|
||||
}
|
||||
case *tmcons.Message_Vote:
|
||||
vote, err := types.VoteFromProto(msg.Vote.Vote)
|
||||
vote, err := consensus.VoteFromProto(msg.Vote.Vote)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("vote msg to proto error: %w", err)
|
||||
}
|
||||
@@ -530,7 +533,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
Index: msg.HasVote.Index,
|
||||
}
|
||||
case *tmcons.Message_VoteSetMaj23:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
|
||||
bi, err := metadata.BlockIDFromProto(&msg.VoteSetMaj23.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err)
|
||||
}
|
||||
@@ -541,7 +544,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) {
|
||||
BlockID: *bi,
|
||||
}
|
||||
case *tmcons.Message_VoteSetBits:
|
||||
bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID)
|
||||
bi, err := metadata.BlockIDFromProto(&msg.VoteSetBits.BlockID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("block ID to proto error: %w", err)
|
||||
}
|
||||
@@ -574,7 +577,7 @@ func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) {
|
||||
var pb tmcons.WALMessage
|
||||
|
||||
switch msg := msg.(type) {
|
||||
case types.EventDataRoundState:
|
||||
case events.EventDataRoundState:
|
||||
pb = tmcons.WALMessage{
|
||||
Sum: &tmcons.WALMessage_EventDataRoundState{
|
||||
EventDataRoundState: &tmproto.EventDataRoundState{
|
||||
@@ -637,7 +640,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
|
||||
|
||||
switch msg := msg.Sum.(type) {
|
||||
case *tmcons.WALMessage_EventDataRoundState:
|
||||
pb = types.EventDataRoundState{
|
||||
pb = events.EventDataRoundState{
|
||||
Height: msg.EventDataRoundState.Height,
|
||||
Round: msg.EventDataRoundState.Round,
|
||||
Step: msg.EventDataRoundState.Step,
|
||||
@@ -650,7 +653,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) {
|
||||
}
|
||||
pb = msgInfo{
|
||||
Msg: walMsg,
|
||||
PeerID: types.NodeID(msg.MsgInfo.PeerID),
|
||||
PeerID: p2p.NodeID(msg.MsgInfo.PeerID),
|
||||
}
|
||||
|
||||
case *tmcons.WALMessage_TimeoutInfo:
|
||||
|
||||
@@ -18,18 +18,21 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestMsgToProto(t *testing.T) {
|
||||
psh := types.PartSetHeader{
|
||||
psh := metadata.PartSetHeader{
|
||||
Total: 1,
|
||||
Hash: tmrand.Bytes(32),
|
||||
}
|
||||
pbPsh := psh.ToProto()
|
||||
bi := types.BlockID{
|
||||
bi := metadata.BlockID{
|
||||
Hash: tmrand.Bytes(32),
|
||||
PartSetHeader: psh,
|
||||
}
|
||||
@@ -37,7 +40,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
bits := bits.NewBitArray(1)
|
||||
pbBits := bits.ToProto()
|
||||
|
||||
parts := types.Part{
|
||||
parts := metadata.Part{
|
||||
Index: 1,
|
||||
Bytes: []byte("test"),
|
||||
Proof: merkle.Proof{
|
||||
@@ -50,7 +53,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
pbParts, err := parts.ToProto()
|
||||
require.NoError(t, err)
|
||||
|
||||
proposal := types.Proposal{
|
||||
proposal := consensus.Proposal{
|
||||
Type: tmproto.ProposalType,
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
@@ -61,9 +64,9 @@ func TestMsgToProto(t *testing.T) {
|
||||
}
|
||||
pbProposal := proposal.ToProto()
|
||||
|
||||
pv := types.NewMockPV()
|
||||
pv := consensus.NewMockPV()
|
||||
vote, err := factory.MakeVote(pv, factory.DefaultTestChainID,
|
||||
0, 1, 0, 2, types.BlockID{}, time.Now())
|
||||
0, 1, 0, 2, metadata.BlockID{}, time.Now())
|
||||
require.NoError(t, err)
|
||||
pbVote := vote.ToProto()
|
||||
|
||||
@@ -210,7 +213,7 @@ func TestMsgToProto(t *testing.T) {
|
||||
|
||||
func TestWALMsgProto(t *testing.T) {
|
||||
|
||||
parts := types.Part{
|
||||
parts := metadata.Part{
|
||||
Index: 1,
|
||||
Bytes: []byte("test"),
|
||||
Proof: merkle.Proof{
|
||||
@@ -229,7 +232,7 @@ func TestWALMsgProto(t *testing.T) {
|
||||
want *tmcons.WALMessage
|
||||
wantErr bool
|
||||
}{
|
||||
{"successful EventDataRoundState", types.EventDataRoundState{
|
||||
{"successful EventDataRoundState", events.EventDataRoundState{
|
||||
Height: 2,
|
||||
Round: 1,
|
||||
Step: "ronies",
|
||||
@@ -248,7 +251,7 @@ func TestWALMsgProto(t *testing.T) {
|
||||
Round: 1,
|
||||
Part: &parts,
|
||||
},
|
||||
PeerID: types.NodeID("string"),
|
||||
PeerID: p2p.NodeID("string"),
|
||||
}, &tmcons.WALMessage{
|
||||
Sum: &tmcons.WALMessage_MsgInfo{
|
||||
MsgInfo: &tmcons.MsgInfo{
|
||||
@@ -316,13 +319,13 @@ func TestWALMsgProto(t *testing.T) {
|
||||
// nolint:lll //ignore line length for tests
|
||||
func TestConsMsgsVectors(t *testing.T) {
|
||||
date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC)
|
||||
psh := types.PartSetHeader{
|
||||
psh := metadata.PartSetHeader{
|
||||
Total: 1,
|
||||
Hash: []byte("add_more_exclamation_marks_code-"),
|
||||
}
|
||||
pbPsh := psh.ToProto()
|
||||
|
||||
bi := types.BlockID{
|
||||
bi := metadata.BlockID{
|
||||
Hash: []byte("add_more_exclamation_marks_code-"),
|
||||
PartSetHeader: psh,
|
||||
}
|
||||
@@ -330,7 +333,7 @@ func TestConsMsgsVectors(t *testing.T) {
|
||||
bits := bits.NewBitArray(1)
|
||||
pbBits := bits.ToProto()
|
||||
|
||||
parts := types.Part{
|
||||
parts := metadata.Part{
|
||||
Index: 1,
|
||||
Bytes: []byte("test"),
|
||||
Proof: merkle.Proof{
|
||||
@@ -343,7 +346,7 @@ func TestConsMsgsVectors(t *testing.T) {
|
||||
pbParts, err := parts.ToProto()
|
||||
require.NoError(t, err)
|
||||
|
||||
proposal := types.Proposal{
|
||||
proposal := consensus.Proposal{
|
||||
Type: tmproto.ProposalType,
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
@@ -354,7 +357,7 @@ func TestConsMsgsVectors(t *testing.T) {
|
||||
}
|
||||
pbProposal := proposal.ToProto()
|
||||
|
||||
v := &types.Vote{
|
||||
v := &consensus.Vote{
|
||||
ValidatorAddress: []byte("add_more_exclamation"),
|
||||
ValidatorIndex: 1,
|
||||
Height: 1,
|
||||
@@ -431,10 +434,10 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
invalidSignedMsgType tmproto.SignedMsgType = 0x03
|
||||
)
|
||||
|
||||
validBlockID := types.BlockID{}
|
||||
invalidBlockID := types.BlockID{
|
||||
validBlockID := metadata.BlockID{}
|
||||
invalidBlockID := metadata.BlockID{
|
||||
Hash: bytes.HexBytes{},
|
||||
PartSetHeader: types.PartSetHeader{
|
||||
PartSetHeader: metadata.PartSetHeader{
|
||||
Total: 1,
|
||||
Hash: []byte{0},
|
||||
},
|
||||
@@ -446,7 +449,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) {
|
||||
messageHeight int64
|
||||
testName string
|
||||
messageType tmproto.SignedMsgType
|
||||
messageBlockID types.BlockID
|
||||
messageBlockID metadata.BlockID
|
||||
}{
|
||||
{false, 0, 0, "Valid Message", validSignedMsgType, validBlockID},
|
||||
{true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID},
|
||||
@@ -479,15 +482,15 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
|
||||
{func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"},
|
||||
{func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"},
|
||||
{func(msg *VoteSetBitsMessage) {
|
||||
msg.BlockID = types.BlockID{
|
||||
msg.BlockID = metadata.BlockID{
|
||||
Hash: bytes.HexBytes{},
|
||||
PartSetHeader: types.PartSetHeader{
|
||||
PartSetHeader: metadata.PartSetHeader{
|
||||
Total: 1,
|
||||
Hash: []byte{0},
|
||||
},
|
||||
}
|
||||
}, "wrong BlockID: wrong PartSetHeader: wrong Hash:"},
|
||||
{func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) },
|
||||
{func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(consensus.MaxVotesCount + 1) },
|
||||
"votes bit array is too big: 10001, max: 10000"},
|
||||
}
|
||||
|
||||
@@ -499,7 +502,7 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) {
|
||||
Round: 0,
|
||||
Type: 0x01,
|
||||
Votes: bits.NewBitArray(1),
|
||||
BlockID: types.BlockID{},
|
||||
BlockID: metadata.BlockID{},
|
||||
}
|
||||
|
||||
tc.malleateFn(msg)
|
||||
@@ -604,7 +607,9 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) {
|
||||
"empty blockParts",
|
||||
},
|
||||
{
|
||||
func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) },
|
||||
func(msg *NewValidBlockMessage) {
|
||||
msg.BlockParts = bits.NewBitArray(int(metadata.MaxBlockPartsCount) + 1)
|
||||
},
|
||||
"blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1",
|
||||
},
|
||||
}
|
||||
@@ -615,7 +620,7 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) {
|
||||
msg := &NewValidBlockMessage{
|
||||
Height: 1,
|
||||
Round: 0,
|
||||
BlockPartSetHeader: types.PartSetHeader{
|
||||
BlockPartSetHeader: metadata.PartSetHeader{
|
||||
Total: 1,
|
||||
},
|
||||
BlockParts: bits.NewBitArray(1),
|
||||
@@ -639,7 +644,7 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) {
|
||||
{func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"},
|
||||
{func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"},
|
||||
{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"},
|
||||
{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) },
|
||||
{func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(consensus.MaxVotesCount + 1) },
|
||||
"proposalPOL bit array is too big: 10001, max: 10000"},
|
||||
}
|
||||
|
||||
@@ -662,13 +667,13 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestBlockPartMessageValidateBasic(t *testing.T) {
|
||||
testPart := new(types.Part)
|
||||
testPart := new(metadata.Part)
|
||||
testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf"))
|
||||
testCases := []struct {
|
||||
testName string
|
||||
messageHeight int64
|
||||
messageRound int32
|
||||
messagePart *types.Part
|
||||
messagePart *metadata.Part
|
||||
expectErr bool
|
||||
}{
|
||||
{"Valid Message", 0, 0, testPart, false},
|
||||
@@ -689,7 +694,7 @@ func TestBlockPartMessageValidateBasic(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)}
|
||||
message := BlockPartMessage{Height: 0, Round: 0, Part: new(metadata.Part)}
|
||||
message.Part.Index = 1
|
||||
|
||||
assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result")
|
||||
|
||||
@@ -12,8 +12,10 @@ import (
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -36,7 +38,7 @@ func (pss peerStateStats) String() string {
|
||||
// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go.
|
||||
// Be mindful of what you Expose.
|
||||
type PeerState struct {
|
||||
peerID types.NodeID
|
||||
peerID p2p.NodeID
|
||||
logger log.Logger
|
||||
|
||||
// NOTE: Modify below using setters, never directly.
|
||||
@@ -50,7 +52,7 @@ type PeerState struct {
|
||||
}
|
||||
|
||||
// NewPeerState returns a new PeerState for the given node ID.
|
||||
func NewPeerState(logger log.Logger, peerID types.NodeID) *PeerState {
|
||||
func NewPeerState(logger log.Logger, peerID p2p.NodeID) *PeerState {
|
||||
return &PeerState{
|
||||
peerID: peerID,
|
||||
logger: logger,
|
||||
@@ -110,7 +112,7 @@ func (ps *PeerState) GetHeight() int64 {
|
||||
}
|
||||
|
||||
// SetHasProposal sets the given proposal as known for the peer.
|
||||
func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
|
||||
func (ps *PeerState) SetHasProposal(proposal *consensus.Proposal) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
@@ -137,7 +139,7 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) {
|
||||
|
||||
// InitProposalBlockParts initializes the peer's proposal block parts header
|
||||
// and bit array.
|
||||
func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) {
|
||||
func (ps *PeerState) InitProposalBlockParts(partSetHeader metadata.PartSetHeader) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
@@ -165,7 +167,7 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in
|
||||
// vote was picked.
|
||||
//
|
||||
// NOTE: `votes` must be the correct Size() for the Height().
|
||||
func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (*types.Vote, bool) {
|
||||
func (ps *PeerState) PickVoteToSend(votes consensus.VoteSetReader) (*consensus.Vote, bool) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
@@ -199,8 +201,21 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (*types.Vote, boo
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (ps *PeerState) PickVoteFromCommit(commit *metadata.Commit) (*consensus.Vote, bool) {
|
||||
psVotes := ps.getVoteBitArray(commit.Height, commit.Round, tmproto.PrecommitType)
|
||||
if psVotes == nil {
|
||||
return nil, false // not something worth sending
|
||||
}
|
||||
|
||||
if index, ok := commit.BitArray().Sub(psVotes).PickRandom(); ok {
|
||||
return consensus.GetVoteFromCommit(commit, int32(index)), true
|
||||
}
|
||||
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray {
|
||||
if !types.IsVoteTypeValid(votesType) {
|
||||
if !consensus.IsVoteTypeValid(votesType) {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -357,7 +372,7 @@ func (ps *PeerState) BlockPartsSent() int {
|
||||
}
|
||||
|
||||
// SetHasVote sets the given vote as known by the peer
|
||||
func (ps *PeerState) SetHasVote(vote *types.Vote) {
|
||||
func (ps *PeerState) SetHasVote(vote *consensus.Vote) {
|
||||
ps.mtx.Lock()
|
||||
defer ps.mtx.Unlock()
|
||||
|
||||
@@ -404,7 +419,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) {
|
||||
|
||||
if psHeight != msg.Height || psRound != msg.Round {
|
||||
ps.PRS.Proposal = false
|
||||
ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{}
|
||||
ps.PRS.ProposalBlockPartSetHeader = metadata.PartSetHeader{}
|
||||
ps.PRS.ProposalBlockParts = nil
|
||||
ps.PRS.ProposalPOLRound = -1
|
||||
ps.PRS.ProposalPOL = nil
|
||||
|
||||
@@ -12,10 +12,13 @@ import (
|
||||
tmevents "github.com/tendermint/tendermint/libs/events"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -125,11 +128,11 @@ type Reactor struct {
|
||||
service.BaseService
|
||||
|
||||
state *State
|
||||
eventBus *types.EventBus
|
||||
eventBus *events.EventBus
|
||||
Metrics *Metrics
|
||||
|
||||
mtx tmsync.RWMutex
|
||||
peers map[types.NodeID]*PeerState
|
||||
peers map[p2ptypes.NodeID]*PeerState
|
||||
waitSync bool
|
||||
|
||||
stateCh *p2p.Channel
|
||||
@@ -165,7 +168,7 @@ func NewReactor(
|
||||
r := &Reactor{
|
||||
state: cs,
|
||||
waitSync: waitSync,
|
||||
peers: make(map[types.NodeID]*PeerState),
|
||||
peers: make(map[p2ptypes.NodeID]*PeerState),
|
||||
Metrics: NopMetrics(),
|
||||
stateCh: stateCh,
|
||||
dataCh: dataCh,
|
||||
@@ -260,7 +263,7 @@ func (r *Reactor) OnStop() {
|
||||
}
|
||||
|
||||
// SetEventBus sets the reactor's event bus.
|
||||
func (r *Reactor) SetEventBus(b *types.EventBus) {
|
||||
func (r *Reactor) SetEventBus(b *events.EventBus) {
|
||||
r.eventBus = b
|
||||
r.state.SetEventBus(b)
|
||||
}
|
||||
@@ -313,7 +316,7 @@ conR:
|
||||
%+v`, err, r.state, r))
|
||||
}
|
||||
|
||||
d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight}
|
||||
d := events.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight}
|
||||
if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil {
|
||||
r.Logger.Error("failed to emit the blocksync complete event", "err", err)
|
||||
}
|
||||
@@ -346,7 +349,7 @@ func (r *Reactor) StringIndented(indent string) string {
|
||||
}
|
||||
|
||||
// GetPeerState returns PeerState for a given NodeID.
|
||||
func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) {
|
||||
func (r *Reactor) GetPeerState(peerID p2ptypes.NodeID) (*PeerState, bool) {
|
||||
r.mtx.RLock()
|
||||
defer r.mtx.RUnlock()
|
||||
|
||||
@@ -375,7 +378,7 @@ func (r *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
func (r *Reactor) broadcastHasVoteMessage(vote *consensus.Vote) {
|
||||
r.stateCh.Out <- p2p.Envelope{
|
||||
Broadcast: true,
|
||||
Message: &tmcons.HasVote{
|
||||
@@ -393,7 +396,7 @@ func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) {
|
||||
func (r *Reactor) subscribeToBroadcastEvents() {
|
||||
err := r.state.evsw.AddListenerForEvent(
|
||||
listenerIDConsensus,
|
||||
types.EventNewRoundStepValue,
|
||||
events.EventNewRoundStepValue,
|
||||
func(data tmevents.EventData) {
|
||||
r.broadcastNewRoundStepMessage(data.(*cstypes.RoundState))
|
||||
select {
|
||||
@@ -408,7 +411,7 @@ func (r *Reactor) subscribeToBroadcastEvents() {
|
||||
|
||||
err = r.state.evsw.AddListenerForEvent(
|
||||
listenerIDConsensus,
|
||||
types.EventValidBlockValue,
|
||||
events.EventValidBlockValue,
|
||||
func(data tmevents.EventData) {
|
||||
r.broadcastNewValidBlockMessage(data.(*cstypes.RoundState))
|
||||
},
|
||||
@@ -419,9 +422,9 @@ func (r *Reactor) subscribeToBroadcastEvents() {
|
||||
|
||||
err = r.state.evsw.AddListenerForEvent(
|
||||
listenerIDConsensus,
|
||||
types.EventVoteValue,
|
||||
events.EventVoteValue,
|
||||
func(data tmevents.EventData) {
|
||||
r.broadcastHasVoteMessage(data.(*types.Vote))
|
||||
r.broadcastHasVoteMessage(data.(*consensus.Vote))
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
@@ -443,7 +446,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) sendNewRoundStepMessage(peerID types.NodeID) {
|
||||
func (r *Reactor) sendNewRoundStepMessage(peerID p2ptypes.NodeID) {
|
||||
rs := r.state.GetRoundState()
|
||||
msg := makeRoundStepMessage(rs)
|
||||
r.stateCh.Out <- p2p.Envelope{
|
||||
@@ -653,7 +656,7 @@ OUTER_LOOP:
|
||||
|
||||
// pickSendVote picks a vote and sends it to the peer. It will return true if
|
||||
// there is a vote to send and false otherwise.
|
||||
func (r *Reactor) pickSendVote(ps *PeerState, votes types.VoteSetReader) bool {
|
||||
func (r *Reactor) pickSendVote(ps *PeerState, votes consensus.VoteSetReader) bool {
|
||||
if vote, ok := ps.PickVoteToSend(votes); ok {
|
||||
r.Logger.Debug("sending vote message", "ps", ps, "vote", vote)
|
||||
r.voteCh.Out <- p2p.Envelope{
|
||||
@@ -670,6 +673,23 @@ func (r *Reactor) pickSendVote(ps *PeerState, votes types.VoteSetReader) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Reactor) sendVoteFromCommit(ps *PeerState, commit *metadata.Commit) bool {
|
||||
if vote, ok := ps.PickVoteFromCommit(commit); ok {
|
||||
r.Logger.Debug("sending vote message", "ps", ps, "vote", vote)
|
||||
r.voteCh.Out <- p2p.Envelope{
|
||||
To: ps.peerID,
|
||||
Message: &tmcons.Vote{
|
||||
Vote: vote.ToProto(),
|
||||
},
|
||||
}
|
||||
|
||||
ps.SetHasVote(vote)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (r *Reactor) gossipVotesForHeight(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool {
|
||||
logger := r.Logger.With("height", prs.Height).With("peer", ps.peerID)
|
||||
|
||||
@@ -781,8 +801,10 @@ OUTER_LOOP:
|
||||
if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase {
|
||||
// Load the block commit for prs.Height, which contains precommit
|
||||
// signatures for prs.Height.
|
||||
// FIXME: It's incredibly inefficient to be sending individual votes to a node that is lagging behind.
|
||||
// We should instead be gossiping entire commits
|
||||
if commit := r.state.blockStore.LoadBlockCommit(prs.Height); commit != nil {
|
||||
if r.pickSendVote(ps, commit) {
|
||||
if r.sendVoteFromCommit(ps, commit) {
|
||||
logger.Debug("picked Catchup commit to send", "height", prs.Height)
|
||||
continue OUTER_LOOP
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ import (
|
||||
|
||||
abcicli "github.com/tendermint/tendermint/abci/client"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
@@ -26,11 +25,16 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/evidence"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
statemocks "github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
)
|
||||
|
||||
@@ -40,14 +44,14 @@ var (
|
||||
|
||||
type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
states map[types.NodeID]*State
|
||||
reactors map[types.NodeID]*Reactor
|
||||
subs map[types.NodeID]types.Subscription
|
||||
blocksyncSubs map[types.NodeID]types.Subscription
|
||||
stateChannels map[types.NodeID]*p2p.Channel
|
||||
dataChannels map[types.NodeID]*p2p.Channel
|
||||
voteChannels map[types.NodeID]*p2p.Channel
|
||||
voteSetBitsChannels map[types.NodeID]*p2p.Channel
|
||||
states map[p2ptypes.NodeID]*State
|
||||
reactors map[p2ptypes.NodeID]*Reactor
|
||||
subs map[p2ptypes.NodeID]events.Subscription
|
||||
blocksyncSubs map[p2ptypes.NodeID]events.Subscription
|
||||
stateChannels map[p2ptypes.NodeID]*p2p.Channel
|
||||
dataChannels map[p2ptypes.NodeID]*p2p.Channel
|
||||
voteChannels map[p2ptypes.NodeID]*p2p.Channel
|
||||
voteSetBitsChannels map[p2ptypes.NodeID]*p2p.Channel
|
||||
}
|
||||
|
||||
func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor {
|
||||
@@ -61,10 +65,10 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
|
||||
rts := &reactorTestSuite{
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
states: make(map[types.NodeID]*State),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
subs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes),
|
||||
states: make(map[p2ptypes.NodeID]*State),
|
||||
reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes),
|
||||
subs: make(map[p2ptypes.NodeID]events.Subscription, numNodes),
|
||||
blocksyncSubs: make(map[p2ptypes.NodeID]events.Subscription, numNodes),
|
||||
}
|
||||
|
||||
rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size)
|
||||
@@ -91,10 +95,10 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
|
||||
reactor.SetEventBus(state.eventBus)
|
||||
|
||||
blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size)
|
||||
blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, events.EventQueryNewBlock, size)
|
||||
require.NoError(t, err)
|
||||
|
||||
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size)
|
||||
fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, events.EventQueryBlockSyncStatus, size)
|
||||
require.NoError(t, err)
|
||||
|
||||
rts.states[nodeID] = state
|
||||
@@ -132,7 +136,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu
|
||||
return rts
|
||||
}
|
||||
|
||||
func validateBlock(block *types.Block, activeVals map[string]struct{}) error {
|
||||
func validateBlock(block *block.Block, activeVals map[string]struct{}) error {
|
||||
if block.LastCommit.Size() != len(activeVals) {
|
||||
return fmt.Errorf(
|
||||
"commit size doesn't match number of active validators. Got %d, expected %d",
|
||||
@@ -153,14 +157,14 @@ func waitForAndValidateBlock(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
blocksSubs []events.Subscription,
|
||||
states []*State,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
|
||||
fn := func(j int) {
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
newBlock := msg.Data().(events.EventDataNewBlock).Block
|
||||
|
||||
require.NoError(t, validateBlock(newBlock, activeVals))
|
||||
|
||||
@@ -186,7 +190,7 @@ func waitForAndValidateBlockWithTx(
|
||||
t *testing.T,
|
||||
n int,
|
||||
activeVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
blocksSubs []events.Subscription,
|
||||
states []*State,
|
||||
txs ...[]byte,
|
||||
) {
|
||||
@@ -196,7 +200,7 @@ func waitForAndValidateBlockWithTx(
|
||||
BLOCK_TX_LOOP:
|
||||
for {
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock := msg.Data().(types.EventDataNewBlock).Block
|
||||
newBlock := msg.Data().(events.EventDataNewBlock).Block
|
||||
|
||||
require.NoError(t, validateBlock(newBlock, activeVals))
|
||||
|
||||
@@ -231,17 +235,17 @@ func waitForBlockWithUpdatedValsAndValidateIt(
|
||||
t *testing.T,
|
||||
n int,
|
||||
updatedVals map[string]struct{},
|
||||
blocksSubs []types.Subscription,
|
||||
blocksSubs []events.Subscription,
|
||||
css []*State,
|
||||
) {
|
||||
|
||||
fn := func(j int) {
|
||||
var newBlock *types.Block
|
||||
var newBlock *block.Block
|
||||
|
||||
LOOP:
|
||||
for {
|
||||
msg := <-blocksSubs[j].Out()
|
||||
newBlock = msg.Data().(types.EventDataNewBlock).Block
|
||||
newBlock = msg.Data().(events.EventDataNewBlock).Block
|
||||
if newBlock.LastCommit.Size() == len(updatedVals) {
|
||||
break LOOP
|
||||
}
|
||||
@@ -265,7 +269,7 @@ func waitForBlockWithUpdatedValsAndValidateIt(
|
||||
|
||||
func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) {
|
||||
t.Helper()
|
||||
status, ok := msg.Data().(types.EventDataBlockSyncStatus)
|
||||
status, ok := msg.Data().(events.EventDataBlockSyncStatus)
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, complete, status.Complete)
|
||||
@@ -293,7 +297,7 @@ func TestReactorBasic(t *testing.T) {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
defer wg.Done()
|
||||
<-s.Out()
|
||||
}(sub)
|
||||
@@ -305,7 +309,7 @@ func TestReactorBasic(t *testing.T) {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the consensus switch
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
defer wg.Done()
|
||||
msg := <-s.Out()
|
||||
ensureBlockSyncStatus(t, msg, true, 0)
|
||||
@@ -338,7 +342,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal
|
||||
app := appFunc()
|
||||
vals := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
vals := consensus.TM2PB.ValidatorUpdates(state.Validators)
|
||||
app.InitChain(abci.RequestInitChain{Validators: vals})
|
||||
|
||||
pv := privVals[i]
|
||||
@@ -360,12 +364,12 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
// everyone includes evidence of another double signing
|
||||
vIdx := (i + 1) % n
|
||||
|
||||
ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID())
|
||||
ev := evidence.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID())
|
||||
evpool := &statemocks.EvidencePool{}
|
||||
evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil)
|
||||
evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{
|
||||
evpool.On("CheckEvidence", mock.AnythingOfType("p2ptypes.EvidenceList")).Return(nil)
|
||||
evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]evidence.Evidence{
|
||||
ev}, int64(len(ev.Bytes())))
|
||||
evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return()
|
||||
evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("p2ptypes.EvidenceList")).Return()
|
||||
|
||||
evpool2 := sm.EmptyEvidencePool{}
|
||||
|
||||
@@ -374,7 +378,7 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
cs.SetLogger(log.TestingLogger().With("module", "consensus"))
|
||||
cs.SetPrivValidator(pv)
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus := events.NewEventBus()
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
err = eventBus.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -399,9 +403,9 @@ func TestReactorWithEvidence(t *testing.T) {
|
||||
|
||||
// We expect for each validator that is the proposer to propose one piece of
|
||||
// evidence.
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
msg := <-s.Out()
|
||||
block := msg.Data().(types.EventDataNewBlock).Block
|
||||
block := msg.Data().(events.EventDataNewBlock).Block
|
||||
|
||||
require.Len(t, block.Evidence.Evidence, 1)
|
||||
wg.Done()
|
||||
@@ -452,7 +456,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
<-s.Out()
|
||||
wg.Done()
|
||||
}(sub)
|
||||
@@ -482,7 +486,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
<-s.Out()
|
||||
wg.Done()
|
||||
}(sub)
|
||||
@@ -557,7 +561,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
<-s.Out()
|
||||
wg.Done()
|
||||
}(sub)
|
||||
@@ -565,7 +569,7 @@ func TestReactorVotingPowerChange(t *testing.T) {
|
||||
|
||||
wg.Wait()
|
||||
|
||||
blocksSubs := []types.Subscription{}
|
||||
blocksSubs := []events.Subscription{}
|
||||
for _, sub := range rts.subs {
|
||||
blocksSubs = append(blocksSubs, sub)
|
||||
}
|
||||
@@ -657,7 +661,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
wg.Add(1)
|
||||
|
||||
// wait till everyone makes the first new block
|
||||
go func(s types.Subscription) {
|
||||
go func(s events.Subscription) {
|
||||
<-s.Out()
|
||||
wg.Done()
|
||||
}(sub)
|
||||
@@ -673,7 +677,7 @@ func TestReactorValidatorSetChanges(t *testing.T) {
|
||||
|
||||
newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower)
|
||||
|
||||
blocksSubs := []types.Subscription{}
|
||||
blocksSubs := []events.Subscription{}
|
||||
for _, sub := range rts.subs {
|
||||
blocksSubs = append(blocksSubs, sub)
|
||||
}
|
||||
|
||||
@@ -9,12 +9,14 @@ import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/crypto/merkle"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
@@ -36,7 +38,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli)
|
||||
// Unmarshal and apply a single message to the consensus state as if it were
|
||||
// received in receiveRoutine. Lines that start with "#" are ignored.
|
||||
// NOTE: receiveRoutine should not be running.
|
||||
func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error {
|
||||
func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub events.Subscription) error {
|
||||
// Skip meta messages which exist for demarcating boundaries.
|
||||
if _, ok := msg.Msg.(EndHeightMessage); ok {
|
||||
return nil
|
||||
@@ -44,14 +46,14 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr
|
||||
|
||||
// for logging
|
||||
switch m := msg.Msg.(type) {
|
||||
case types.EventDataRoundState:
|
||||
case events.EventDataRoundState:
|
||||
cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step)
|
||||
// these are playback checks
|
||||
ticker := time.After(time.Second * 2)
|
||||
if newStepSub != nil {
|
||||
select {
|
||||
case stepMsg := <-newStepSub.Out():
|
||||
m2 := stepMsg.Data().(types.EventDataRoundState)
|
||||
m2 := stepMsg.Data().(events.EventDataRoundState)
|
||||
if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step {
|
||||
return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m)
|
||||
}
|
||||
@@ -202,21 +204,21 @@ type Handshaker struct {
|
||||
stateStore sm.Store
|
||||
initialState sm.State
|
||||
store sm.BlockStore
|
||||
eventBus types.BlockEventPublisher
|
||||
genDoc *types.GenesisDoc
|
||||
eventBus events.BlockEventPublisher
|
||||
genDoc *consensus.GenesisDoc
|
||||
logger log.Logger
|
||||
|
||||
nBlocks int // number of blocks applied to the state
|
||||
}
|
||||
|
||||
func NewHandshaker(stateStore sm.Store, state sm.State,
|
||||
store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker {
|
||||
store sm.BlockStore, genDoc *consensus.GenesisDoc) *Handshaker {
|
||||
|
||||
return &Handshaker{
|
||||
stateStore: stateStore,
|
||||
initialState: state,
|
||||
store: store,
|
||||
eventBus: types.NopEventBus{},
|
||||
eventBus: events.NopEventBus{},
|
||||
genDoc: genDoc,
|
||||
logger: log.NewNopLogger(),
|
||||
nBlocks: 0,
|
||||
@@ -229,7 +231,7 @@ func (h *Handshaker) SetLogger(l log.Logger) {
|
||||
|
||||
// SetEventBus - sets the event bus for publishing block related events.
|
||||
// If not called, it defaults to types.NopEventBus.
|
||||
func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) {
|
||||
func (h *Handshaker) SetEventBus(eventBus events.BlockEventPublisher) {
|
||||
h.eventBus = eventBus
|
||||
}
|
||||
|
||||
@@ -302,12 +304,12 @@ func (h *Handshaker) ReplayBlocks(
|
||||
|
||||
// If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain.
|
||||
if appBlockHeight == 0 {
|
||||
validators := make([]*types.Validator, len(h.genDoc.Validators))
|
||||
validators := make([]*consensus.Validator, len(h.genDoc.Validators))
|
||||
for i, val := range h.genDoc.Validators {
|
||||
validators[i] = types.NewValidator(val.PubKey, val.Power)
|
||||
validators[i] = consensus.NewValidator(val.PubKey, val.Power)
|
||||
}
|
||||
validatorSet := types.NewValidatorSet(validators)
|
||||
nextVals := types.TM2PB.ValidatorUpdates(validatorSet)
|
||||
validatorSet := consensus.NewValidatorSet(validators)
|
||||
nextVals := consensus.TM2PB.ValidatorUpdates(validatorSet)
|
||||
pbParams := h.genDoc.ConsensusParams.ToProto()
|
||||
req := abci.RequestInitChain{
|
||||
Time: h.genDoc.GenesisTime,
|
||||
@@ -333,12 +335,12 @@ func (h *Handshaker) ReplayBlocks(
|
||||
}
|
||||
// If the app returned validators or consensus params, update the state.
|
||||
if len(res.Validators) > 0 {
|
||||
vals, err := types.PB2TM.ValidatorUpdates(res.Validators)
|
||||
vals, err := consensus.PB2TM.ValidatorUpdates(res.Validators)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state.Validators = types.NewValidatorSet(vals)
|
||||
state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1)
|
||||
state.Validators = consensus.NewValidatorSet(vals)
|
||||
state.NextValidators = consensus.NewValidatorSet(vals).CopyIncrementProposerPriority(1)
|
||||
} else if len(h.genDoc.Validators) == 0 {
|
||||
// If validator set is not set in genesis and still empty after InitChain, exit.
|
||||
return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain")
|
||||
@@ -525,13 +527,13 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap
|
||||
return state, nil
|
||||
}
|
||||
|
||||
func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) {
|
||||
if !bytes.Equal(appHash, block.AppHash) {
|
||||
func assertAppHashEqualsOneFromBlock(appHash []byte, b *block.Block) {
|
||||
if !bytes.Equal(appHash, b.AppHash) {
|
||||
panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X.
|
||||
|
||||
Block: %v
|
||||
`,
|
||||
appHash, block.AppHash, block))
|
||||
appHash, b.AppHash, b))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -16,10 +16,10 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -54,12 +54,12 @@ func (cs *State) ReplayFile(file string, console bool) error {
|
||||
// ensure all new step events are regenerated as expected
|
||||
|
||||
ctx := context.Background()
|
||||
newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, events.EventQueryNewRoundStep)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)
|
||||
return fmt.Errorf("failed to subscribe %s to %v", subscriber, events.EventQueryNewRoundStep)
|
||||
}
|
||||
defer func() {
|
||||
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
|
||||
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: events.EventQueryNewRoundStep}
|
||||
if err := cs.eventBus.Unsubscribe(ctx, args); err != nil {
|
||||
cs.Logger.Error("Error unsubscribing to event bus", "err", err)
|
||||
}
|
||||
@@ -125,7 +125,7 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl
|
||||
}
|
||||
|
||||
// go back count steps by resetting the state and running (pb.count - count) steps
|
||||
func (pb *playback) replayReset(count int, newStepSub types.Subscription) error {
|
||||
func (pb *playback) replayReset(count int, newStepSub events.Subscription) error {
|
||||
if err := pb.cs.Stop(); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -222,12 +222,12 @@ func (pb *playback) replayConsoleLoop() int {
|
||||
ctx := context.Background()
|
||||
// ensure all new step events are regenerated as expected
|
||||
|
||||
newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep)
|
||||
newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, events.EventQueryNewRoundStep)
|
||||
if err != nil {
|
||||
tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep))
|
||||
tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, events.EventQueryNewRoundStep))
|
||||
}
|
||||
defer func() {
|
||||
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep}
|
||||
args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: events.EventQueryNewRoundStep}
|
||||
if err := pb.cs.eventBus.Unsubscribe(ctx, args); err != nil {
|
||||
pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err)
|
||||
}
|
||||
@@ -318,7 +318,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo
|
||||
tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err))
|
||||
}
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus := events.NewEventBus()
|
||||
if err := eventBus.Start(); err != nil {
|
||||
tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err))
|
||||
}
|
||||
|
||||
@@ -3,12 +3,12 @@ package consensus
|
||||
import (
|
||||
"context"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
mempl "github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/mempool"
|
||||
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -20,14 +20,14 @@ var _ mempl.Mempool = emptyMempool{}
|
||||
func (emptyMempool) Lock() {}
|
||||
func (emptyMempool) Unlock() {}
|
||||
func (emptyMempool) Size() int { return 0 }
|
||||
func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
|
||||
func (emptyMempool) CheckTx(_ context.Context, _ mempool.Tx, _ func(*abci.Response), _ mempl.TxInfo) error {
|
||||
return nil
|
||||
}
|
||||
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} }
|
||||
func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) mempool.Txs { return mempool.Txs{} }
|
||||
func (emptyMempool) ReapMaxTxs(n int) mempool.Txs { return mempool.Txs{} }
|
||||
func (emptyMempool) Update(
|
||||
_ int64,
|
||||
_ types.Txs,
|
||||
_ mempool.Txs,
|
||||
_ []*abci.ResponseDeliverTx,
|
||||
_ mempl.PreCheckFunc,
|
||||
_ mempl.PostCheckFunc,
|
||||
|
||||
@@ -20,7 +20,6 @@ import (
|
||||
dbm "github.com/tendermint/tm-db"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
cryptoenc "github.com/tendermint/tendermint/crypto/encoding"
|
||||
@@ -28,6 +27,11 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmstate "github.com/tendermint/tendermint/proto/tendermint/state"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
@@ -35,7 +39,6 @@ import (
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// These tests ensure we can always recover from failure at any part of the consensus process.
|
||||
@@ -84,7 +87,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi
|
||||
// in the WAL itself. Assuming the consensus state is running, replay of any
|
||||
// WAL, including the empty one, should eventually be followed by a new
|
||||
// block, or else something is wrong.
|
||||
newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock)
|
||||
newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, events.EventQueryNewBlock)
|
||||
require.NoError(t, err)
|
||||
select {
|
||||
case <-newBlockSub.Out():
|
||||
@@ -286,8 +289,8 @@ func (w *crashingWAL) Wait() { w.next.Wait() }
|
||||
type simulatorTestSuite struct {
|
||||
GenesisState sm.State
|
||||
Config *cfg.Config
|
||||
Chain []*types.Block
|
||||
Commits []*types.Commit
|
||||
Chain []*block.Block
|
||||
Commits []*metadata.Commit
|
||||
CleanupFunc cleanupFunc
|
||||
|
||||
Mempool mempl.Mempool
|
||||
@@ -331,10 +334,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
|
||||
sim.GenesisState, _ = sm.MakeGenesisState(genDoc)
|
||||
sim.CleanupFunc = cleanup
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound)
|
||||
proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(css[0].eventBus, events.EventQueryNewRound)
|
||||
proposalCh := subscribe(css[0].eventBus, events.EventQueryCompleteProposal)
|
||||
|
||||
vss := make([]*validatorStub, nPeers)
|
||||
for i := 0; i < nPeers; i++ {
|
||||
@@ -367,9 +370,9 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
blockID := metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
|
||||
proposal := types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
proposal := consensus.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -399,9 +402,9 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
blockID = metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
|
||||
proposal = types.NewProposal(vss[2].Height, round, -1, blockID)
|
||||
proposal = consensus.NewProposal(vss[2].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[2].SignProposal(context.Background(), config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -438,7 +441,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
blockID = metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
newVss := make([]*validatorStub, nVals+1)
|
||||
copy(newVss, vss[:nVals+1])
|
||||
sort.Sort(ValidatorStubsByPower(newVss))
|
||||
@@ -460,7 +463,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
|
||||
|
||||
selfIndex := valIndexFn(0)
|
||||
|
||||
proposal = types.NewProposal(vss[3].Height, round, -1, blockID)
|
||||
proposal = consensus.NewProposal(vss[3].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[3].SignProposal(context.Background(), config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -517,13 +520,13 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
|
||||
assert.Nil(t, err)
|
||||
propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
propBlockParts = propBlock.MakePartSet(partSize)
|
||||
blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
blockID = metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
newVss = make([]*validatorStub, nVals+3)
|
||||
copy(newVss, vss[:nVals+3])
|
||||
sort.Sort(ValidatorStubsByPower(newVss))
|
||||
|
||||
selfIndex = valIndexFn(0)
|
||||
proposal = types.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
proposal = consensus.NewProposal(vss[1].Height, round, -1, blockID)
|
||||
p = proposal.ToProto()
|
||||
if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -546,8 +549,8 @@ func setupSimulator(t *testing.T) *simulatorTestSuite {
|
||||
}
|
||||
ensureNewRound(newRoundCh, height+1, 0)
|
||||
|
||||
sim.Chain = make([]*types.Block, 0)
|
||||
sim.Commits = make([]*types.Commit, 0)
|
||||
sim.Chain = make([]*block.Block, 0)
|
||||
sim.Commits = make([]*metadata.Commit, 0)
|
||||
for i := 1; i <= numBlocks; i++ {
|
||||
sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i)))
|
||||
sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i)))
|
||||
@@ -680,8 +683,8 @@ func tempWALWithData(data []byte) string {
|
||||
// Make some blocks. Start a fresh app and apply nBlocks blocks.
|
||||
// Then restart the app and sync it up with the remaining blocks
|
||||
func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mode uint, testValidatorsChange bool) {
|
||||
var chain []*types.Block
|
||||
var commits []*types.Commit
|
||||
var chain []*block.Block
|
||||
var commits []*metadata.Commit
|
||||
var store *mockBlockStore
|
||||
var stateDB dbm.DB
|
||||
var genesisState sm.State
|
||||
@@ -695,7 +698,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod
|
||||
|
||||
genesisState = sim.GenesisState
|
||||
config = sim.Config
|
||||
chain = append([]*types.Block{}, sim.Chain...) // copy chain
|
||||
chain = append([]*block.Block{}, sim.Chain...) // copy chain
|
||||
commits = sim.Commits
|
||||
store = newMockBlockStore(config, genesisState.ConsensusParams)
|
||||
} else { // test single node
|
||||
@@ -813,13 +816,13 @@ func applyBlock(stateStore sm.Store,
|
||||
mempool mempl.Mempool,
|
||||
evpool sm.EvidencePool,
|
||||
st sm.State,
|
||||
blk *types.Block,
|
||||
blk *block.Block,
|
||||
proxyApp proxy.AppConns,
|
||||
blockStore *mockBlockStore) sm.State {
|
||||
testPartSize := types.BlockPartSizeBytes
|
||||
testPartSize := metadata.BlockPartSizeBytes
|
||||
blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore)
|
||||
|
||||
blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()}
|
||||
blkID := metadata.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()}
|
||||
newState, err := blockExec.ApplyBlock(st, blkID, blk)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
@@ -833,7 +836,7 @@ func buildAppStateFromChain(
|
||||
mempool mempl.Mempool,
|
||||
evpool sm.EvidencePool,
|
||||
state sm.State,
|
||||
chain []*types.Block,
|
||||
chain []*block.Block,
|
||||
nBlocks int,
|
||||
mode uint,
|
||||
blockStore *mockBlockStore) {
|
||||
@@ -844,7 +847,7 @@ func buildAppStateFromChain(
|
||||
defer proxyApp.Stop() //nolint:errcheck // ignore
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
validators := consensus.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
@@ -882,7 +885,7 @@ func buildTMStateFromChain(
|
||||
evpool sm.EvidencePool,
|
||||
stateStore sm.Store,
|
||||
state sm.State,
|
||||
chain []*types.Block,
|
||||
chain []*block.Block,
|
||||
nBlocks int,
|
||||
mode uint,
|
||||
blockStore *mockBlockStore) sm.State {
|
||||
@@ -899,7 +902,7 @@ func buildTMStateFromChain(
|
||||
defer proxyApp.Stop() //nolint:errcheck
|
||||
|
||||
state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version
|
||||
validators := types.TM2PB.ValidatorUpdates(state.Validators)
|
||||
validators := consensus.TM2PB.ValidatorUpdates(state.Validators)
|
||||
if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{
|
||||
Validators: validators,
|
||||
}); err != nil {
|
||||
@@ -1026,7 +1029,7 @@ func (app *badApp) Commit() abci.ResponseCommit {
|
||||
//--------------------------
|
||||
// utils for making blocks
|
||||
|
||||
func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
func makeBlockchainFromWAL(wal WAL) ([]*block.Block, []*metadata.Commit, error) {
|
||||
var height int64
|
||||
|
||||
// Search for height marker
|
||||
@@ -1042,10 +1045,10 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
// log.Notice("Build a blockchain by reading from the WAL")
|
||||
|
||||
var (
|
||||
blocks []*types.Block
|
||||
commits []*types.Commit
|
||||
thisBlockParts *types.PartSet
|
||||
thisBlockCommit *types.Commit
|
||||
blocks []*block.Block
|
||||
commits []*metadata.Commit
|
||||
thisBlockParts *metadata.PartSet
|
||||
thisBlockCommit *metadata.Commit
|
||||
)
|
||||
|
||||
dec := NewWALDecoder(gr)
|
||||
@@ -1075,7 +1078,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
block, err := block.BlockFromProto(pbb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
@@ -1091,17 +1094,17 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
commits = append(commits, thisBlockCommit)
|
||||
height++
|
||||
}
|
||||
case *types.PartSetHeader:
|
||||
thisBlockParts = types.NewPartSetFromHeader(*p)
|
||||
case *types.Part:
|
||||
case *metadata.PartSetHeader:
|
||||
thisBlockParts = metadata.NewPartSetFromHeader(*p)
|
||||
case *metadata.Part:
|
||||
_, err := thisBlockParts.AddPart(p)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
case *types.Vote:
|
||||
case *consensus.Vote:
|
||||
if p.Type == tmproto.PrecommitType {
|
||||
thisBlockCommit = types.NewCommit(p.Height, p.Round,
|
||||
p.BlockID, []types.CommitSig{p.CommitSig()})
|
||||
thisBlockCommit = metadata.NewCommit(p.Height, p.Round,
|
||||
p.BlockID, []metadata.CommitSig{p.CommitSig()})
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1115,18 +1118,18 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
b, err := block.BlockFromProto(pbb)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if block.Height != height+1 {
|
||||
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1))
|
||||
if b.Height != height+1 {
|
||||
panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", b.Height, height+1))
|
||||
}
|
||||
commitHeight := thisBlockCommit.Height
|
||||
if commitHeight != height+1 {
|
||||
panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1))
|
||||
}
|
||||
blocks = append(blocks, block)
|
||||
blocks = append(blocks, b)
|
||||
commits = append(commits, thisBlockCommit)
|
||||
return blocks, commits, nil
|
||||
}
|
||||
@@ -1171,39 +1174,39 @@ func stateAndStore(
|
||||
|
||||
type mockBlockStore struct {
|
||||
config *cfg.Config
|
||||
params types.ConsensusParams
|
||||
chain []*types.Block
|
||||
commits []*types.Commit
|
||||
params consensus.ConsensusParams
|
||||
chain []*block.Block
|
||||
commits []*metadata.Commit
|
||||
base int64
|
||||
}
|
||||
|
||||
// TODO: NewBlockStore(db.NewMemDB) ...
|
||||
func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore {
|
||||
func newMockBlockStore(config *cfg.Config, params consensus.ConsensusParams) *mockBlockStore {
|
||||
return &mockBlockStore{config, params, nil, nil, 0}
|
||||
}
|
||||
|
||||
func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) }
|
||||
func (bs *mockBlockStore) Base() int64 { return bs.base }
|
||||
func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 }
|
||||
func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) }
|
||||
func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] }
|
||||
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block {
|
||||
func (bs *mockBlockStore) LoadBaseMeta() *block.BlockMeta { return bs.LoadBlockMeta(bs.base) }
|
||||
func (bs *mockBlockStore) LoadBlock(height int64) *block.Block { return bs.chain[height-1] }
|
||||
func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *block.Block {
|
||||
return bs.chain[int64(len(bs.chain))-1]
|
||||
}
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
block := bs.chain[height-1]
|
||||
return &types.BlockMeta{
|
||||
BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()},
|
||||
Header: block.Header,
|
||||
func (bs *mockBlockStore) LoadBlockMeta(height int64) *block.BlockMeta {
|
||||
b := bs.chain[height-1]
|
||||
return &block.BlockMeta{
|
||||
BlockID: metadata.BlockID{Hash: b.Hash(), PartSetHeader: b.MakePartSet(metadata.BlockPartSizeBytes).Header()},
|
||||
Header: b.Header,
|
||||
}
|
||||
}
|
||||
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil }
|
||||
func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) {
|
||||
func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *metadata.Part { return nil }
|
||||
func (bs *mockBlockStore) SaveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) {
|
||||
}
|
||||
func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
func (bs *mockBlockStore) LoadBlockCommit(height int64) *metadata.Commit {
|
||||
return bs.commits[height-1]
|
||||
}
|
||||
func (bs *mockBlockStore) LoadSeenCommit() *types.Commit {
|
||||
func (bs *mockBlockStore) LoadSeenCommit() *metadata.Commit {
|
||||
return bs.commits[len(bs.commits)-1]
|
||||
}
|
||||
|
||||
@@ -1223,8 +1226,8 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) {
|
||||
|
||||
func TestHandshakeUpdatesValidators(t *testing.T) {
|
||||
val, _ := factory.RandValidator(true, 10)
|
||||
vals := types.NewValidatorSet([]*types.Validator{val})
|
||||
app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)}
|
||||
vals := consensus.NewValidatorSet([]*consensus.Validator{val})
|
||||
app := &initChainApp{vals: consensus.TM2PB.ValidatorUpdates(vals)}
|
||||
clientCreator := proxy.NewLocalClientCreator(app)
|
||||
|
||||
config := ResetConfig("handshake_test_")
|
||||
|
||||
@@ -24,11 +24,16 @@ import (
|
||||
tmos "github.com/tendermint/tendermint/libs/os"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
types "github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/evidence"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
tmgrpc "github.com/tendermint/tendermint/privval/grpc"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// Consensus sentinel errors
|
||||
@@ -45,8 +50,8 @@ var msgQueueSize = 1000
|
||||
|
||||
// msgs from the reactor which may update the state
|
||||
type msgInfo struct {
|
||||
Msg Message `json:"msg"`
|
||||
PeerID types.NodeID `json:"peer_key"`
|
||||
Msg Message `json:"msg"`
|
||||
PeerID p2ptypes.NodeID `json:"peer_key"`
|
||||
}
|
||||
|
||||
// internally generated messages which may update the state
|
||||
@@ -117,7 +122,7 @@ type State struct {
|
||||
|
||||
// we use eventBus to trigger msg broadcasts in the reactor,
|
||||
// and to notify external subscribers, eg. through a websocket
|
||||
eventBus *types.EventBus
|
||||
eventBus *events.EventBus
|
||||
|
||||
// a Write-Ahead Log ensures we can recover from any kind of crash
|
||||
// and helps us avoid signing conflicting votes
|
||||
@@ -207,7 +212,7 @@ func (cs *State) SetLogger(l log.Logger) {
|
||||
}
|
||||
|
||||
// SetEventBus sets event bus.
|
||||
func (cs *State) SetEventBus(b *types.EventBus) {
|
||||
func (cs *State) SetEventBus(b *events.EventBus) {
|
||||
cs.eventBus = b
|
||||
cs.blockExec.SetEventBus(b)
|
||||
}
|
||||
@@ -309,7 +314,7 @@ func (cs *State) SetTimeoutTicker(timeoutTicker TimeoutTicker) {
|
||||
}
|
||||
|
||||
// LoadCommit loads the commit for a given height.
|
||||
func (cs *State) LoadCommit(height int64) *types.Commit {
|
||||
func (cs *State) LoadCommit(height int64) *metadata.Commit {
|
||||
cs.mtx.RLock()
|
||||
defer cs.mtx.RUnlock()
|
||||
|
||||
@@ -500,7 +505,7 @@ func (cs *State) OpenWAL(walFile string) (WAL, error) {
|
||||
// TODO: should these return anything or let callers just use events?
|
||||
|
||||
// AddVote inputs a vote.
|
||||
func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
|
||||
func (cs *State) AddVote(vote *types.Vote, peerID p2ptypes.NodeID) (added bool, err error) {
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""}
|
||||
} else {
|
||||
@@ -512,7 +517,7 @@ func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err
|
||||
}
|
||||
|
||||
// SetProposal inputs a proposal.
|
||||
func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) error {
|
||||
func (cs *State) SetProposal(proposal *types.Proposal, peerID p2ptypes.NodeID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""}
|
||||
@@ -525,7 +530,7 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) erro
|
||||
}
|
||||
|
||||
// AddProposalBlockPart inputs a part of the proposal block.
|
||||
func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID types.NodeID) error {
|
||||
func (cs *State) AddProposalBlockPart(height int64, round int32, part *metadata.Part, peerID p2ptypes.NodeID) error {
|
||||
|
||||
if peerID == "" {
|
||||
cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""}
|
||||
@@ -540,9 +545,9 @@ func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Par
|
||||
// SetProposalAndBlock inputs the proposal and all block parts.
|
||||
func (cs *State) SetProposalAndBlock(
|
||||
proposal *types.Proposal,
|
||||
block *types.Block,
|
||||
parts *types.PartSet,
|
||||
peerID types.NodeID,
|
||||
block *block.Block,
|
||||
parts *metadata.PartSet,
|
||||
peerID p2ptypes.NodeID,
|
||||
) error {
|
||||
|
||||
if err := cs.SetProposal(proposal, peerID); err != nil {
|
||||
@@ -613,7 +618,7 @@ func (cs *State) reconstructLastCommit(state sm.State) {
|
||||
))
|
||||
}
|
||||
|
||||
lastPrecommits := types.CommitToVoteSet(state.ChainID, commit, state.LastValidators)
|
||||
lastPrecommits := types.VoteSetFromCommit(state.ChainID, commit, state.LastValidators)
|
||||
if !lastPrecommits.HasTwoThirdsMajority() {
|
||||
panic("failed to reconstruct last commit; does not have +2/3 maj")
|
||||
}
|
||||
@@ -744,7 +749,7 @@ func (cs *State) newStep() {
|
||||
cs.Logger.Error("failed publishing new round step", "err", err)
|
||||
}
|
||||
|
||||
cs.evsw.FireEvent(types.EventNewRoundStepValue, &cs.RoundState)
|
||||
cs.evsw.FireEvent(events.EventNewRoundStepValue, &cs.RoundState)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1161,8 +1166,8 @@ func (cs *State) isProposer(address []byte) bool {
|
||||
}
|
||||
|
||||
func (cs *State) defaultDecideProposal(height int64, round int32) {
|
||||
var block *types.Block
|
||||
var blockParts *types.PartSet
|
||||
var block *block.Block
|
||||
var blockParts *metadata.PartSet
|
||||
|
||||
// Decide on block
|
||||
if cs.ValidBlock != nil {
|
||||
@@ -1183,7 +1188,7 @@ func (cs *State) defaultDecideProposal(height int64, round int32) {
|
||||
}
|
||||
|
||||
// Make proposal
|
||||
propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
propBlockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID)
|
||||
p := proposal.ToProto()
|
||||
|
||||
@@ -1230,17 +1235,17 @@ func (cs *State) isProposalComplete() bool {
|
||||
//
|
||||
// NOTE: keep it side-effect free for clarity.
|
||||
// CONTRACT: cs.privValidator is not nil.
|
||||
func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) {
|
||||
func (cs *State) createProposalBlock() (block *block.Block, blockParts *metadata.PartSet) {
|
||||
if cs.privValidator == nil {
|
||||
panic("entered createProposalBlock with privValidator being nil")
|
||||
}
|
||||
|
||||
var commit *types.Commit
|
||||
var commit *metadata.Commit
|
||||
switch {
|
||||
case cs.Height == cs.state.InitialHeight:
|
||||
// We're creating a proposal for the first block.
|
||||
// The commit is empty, but not nil.
|
||||
commit = types.NewCommit(0, 0, types.BlockID{}, nil)
|
||||
commit = metadata.NewCommit(0, 0, metadata.BlockID{}, nil)
|
||||
|
||||
case cs.LastCommit.HasTwoThirdsMajority():
|
||||
// Make the commit from LastCommit
|
||||
@@ -1306,7 +1311,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) {
|
||||
// If ProposalBlock is nil, prevote nil.
|
||||
if cs.ProposalBlock == nil {
|
||||
logger.Debug("prevote step: ProposalBlock is nil")
|
||||
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
cs.signAddVote(tmproto.PrevoteType, nil, metadata.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1315,7 +1320,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) {
|
||||
if err != nil {
|
||||
// ProposalBlock is invalid, prevote nil.
|
||||
logger.Error("prevote step: ProposalBlock is invalid", "err", err)
|
||||
cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{})
|
||||
cs.signAddVote(tmproto.PrevoteType, nil, metadata.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1393,7 +1398,7 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
logger.Debug("precommit step; no +2/3 prevotes during enterPrecommit; precommitting nil")
|
||||
}
|
||||
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, metadata.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1423,7 +1428,7 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
}
|
||||
}
|
||||
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, metadata.PartSetHeader{})
|
||||
return
|
||||
}
|
||||
|
||||
@@ -1474,14 +1479,14 @@ func (cs *State) enterPrecommit(height int64, round int32) {
|
||||
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) {
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
cs.ProposalBlockParts = metadata.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
}
|
||||
|
||||
if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil {
|
||||
logger.Error("failed publishing event unlock", "err", err)
|
||||
}
|
||||
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{})
|
||||
cs.signAddVote(tmproto.PrecommitType, nil, metadata.PartSetHeader{})
|
||||
}
|
||||
|
||||
// Enter: any +2/3 precommits for next round.
|
||||
@@ -1568,13 +1573,13 @@ func (cs *State) enterCommit(height int64, commitRound int32) {
|
||||
// We're getting the wrong block.
|
||||
// Set up ProposalBlockParts and keep waiting.
|
||||
cs.ProposalBlock = nil
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
cs.ProposalBlockParts = metadata.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
|
||||
if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil {
|
||||
logger.Error("failed publishing valid block", "err", err)
|
||||
}
|
||||
|
||||
cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState)
|
||||
cs.evsw.FireEvent(events.EventValidBlockValue, &cs.RoundState)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1690,7 +1695,7 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
// NOTE The block.AppHash wont reflect these txs until the next block.
|
||||
stateCopy, err := cs.blockExec.ApplyBlock(
|
||||
stateCopy,
|
||||
types.BlockID{
|
||||
metadata.BlockID{
|
||||
Hash: block.Hash(),
|
||||
PartSetHeader: blockParts.Header(),
|
||||
},
|
||||
@@ -1726,7 +1731,7 @@ func (cs *State) finalizeCommit(height int64) {
|
||||
// * cs.StartTime is set to when we will start round0.
|
||||
}
|
||||
|
||||
func (cs *State) RecordMetrics(height int64, block *types.Block) {
|
||||
func (cs *State) RecordMetrics(height int64, block *block.Block) {
|
||||
cs.metrics.Validators.Set(float64(cs.Validators.Size()))
|
||||
cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower()))
|
||||
|
||||
@@ -1791,7 +1796,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) {
|
||||
)
|
||||
|
||||
for _, ev := range block.Evidence.Evidence {
|
||||
if dve, ok := ev.(*types.DuplicateVoteEvidence); ok {
|
||||
if dve, ok := ev.(*evidence.DuplicateVoteEvidence); ok {
|
||||
if _, val := cs.Validators.GetByAddress(dve.VoteA.ValidatorAddress); val != nil {
|
||||
byzantineValidatorsCount++
|
||||
byzantineValidatorsPower += val.VotingPower
|
||||
@@ -1850,7 +1855,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
|
||||
// This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round.
|
||||
// TODO: We can check if Proposal is for a different block as this is a sign of misbehavior!
|
||||
if cs.ProposalBlockParts == nil {
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader)
|
||||
cs.ProposalBlockParts = metadata.NewPartSetFromHeader(proposal.BlockID.PartSetHeader)
|
||||
}
|
||||
|
||||
cs.Logger.Info("received proposal", "proposal", proposal)
|
||||
@@ -1860,7 +1865,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error {
|
||||
// NOTE: block is not necessarily valid.
|
||||
// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit,
|
||||
// once we have the full block.
|
||||
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (added bool, err error) {
|
||||
func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2ptypes.NodeID) (added bool, err error) {
|
||||
height, round, part := msg.Height, msg.Round, msg.Part
|
||||
|
||||
// Blocks might be reused, so round mismatch is OK
|
||||
@@ -1904,7 +1909,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID
|
||||
return added, err
|
||||
}
|
||||
|
||||
block, err := types.BlockFromProto(pbb)
|
||||
block, err := block.BlockFromProto(pbb)
|
||||
if err != nil {
|
||||
return added, err
|
||||
}
|
||||
@@ -1958,7 +1963,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID
|
||||
}
|
||||
|
||||
// Attempt to add the vote. if its a duplicate signature, dupeout the validator
|
||||
func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) {
|
||||
func (cs *State) tryAddVote(vote *types.Vote, peerID p2ptypes.NodeID) (bool, error) {
|
||||
added, err := cs.addVote(vote, peerID)
|
||||
if err != nil {
|
||||
// If the vote height is off, we'll just ignore it,
|
||||
@@ -2006,7 +2011,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error)
|
||||
return added, nil
|
||||
}
|
||||
|
||||
func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
|
||||
func (cs *State) addVote(vote *types.Vote, peerID p2ptypes.NodeID) (added bool, err error) {
|
||||
cs.Logger.Debug(
|
||||
"adding vote",
|
||||
"vote_height", vote.Height,
|
||||
@@ -2030,11 +2035,11 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
|
||||
}
|
||||
|
||||
cs.Logger.Debug("added vote to last precommits", "last_commit", cs.LastCommit.StringShort())
|
||||
if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil {
|
||||
if err := cs.eventBus.PublishEventVote(events.EventDataVote{Vote: vote}); err != nil {
|
||||
return added, err
|
||||
}
|
||||
|
||||
cs.evsw.FireEvent(types.EventVoteValue, vote)
|
||||
cs.evsw.FireEvent(events.EventVoteValue, vote)
|
||||
|
||||
// if we can skip timeoutCommit and have all the votes now,
|
||||
if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() {
|
||||
@@ -2060,10 +2065,10 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
|
||||
return
|
||||
}
|
||||
|
||||
if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil {
|
||||
if err := cs.eventBus.PublishEventVote(events.EventDataVote{Vote: vote}); err != nil {
|
||||
return added, err
|
||||
}
|
||||
cs.evsw.FireEvent(types.EventVoteValue, vote)
|
||||
cs.evsw.FireEvent(events.EventVoteValue, vote)
|
||||
|
||||
switch vote.Type {
|
||||
case tmproto.PrevoteType:
|
||||
@@ -2114,10 +2119,10 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
|
||||
}
|
||||
|
||||
if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) {
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
cs.ProposalBlockParts = metadata.NewPartSetFromHeader(blockID.PartSetHeader)
|
||||
}
|
||||
|
||||
cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState)
|
||||
cs.evsw.FireEvent(events.EventValidBlockValue, &cs.RoundState)
|
||||
if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil {
|
||||
return added, err
|
||||
}
|
||||
@@ -2184,7 +2189,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err
|
||||
func (cs *State) signVote(
|
||||
msgType tmproto.SignedMsgType,
|
||||
hash []byte,
|
||||
header types.PartSetHeader,
|
||||
header metadata.PartSetHeader,
|
||||
) (*types.Vote, error) {
|
||||
// Flush the WAL. Otherwise, we may not recompute the same vote to sign,
|
||||
// and the privValidator will refuse to sign anything.
|
||||
@@ -2206,7 +2211,7 @@ func (cs *State) signVote(
|
||||
Round: cs.Round,
|
||||
Timestamp: cs.voteTime(),
|
||||
Type: msgType,
|
||||
BlockID: types.BlockID{Hash: hash, PartSetHeader: header},
|
||||
BlockID: metadata.BlockID{Hash: hash, PartSetHeader: header},
|
||||
}
|
||||
|
||||
v := vote.ToProto()
|
||||
@@ -2259,7 +2264,7 @@ func (cs *State) voteTime() time.Time {
|
||||
}
|
||||
|
||||
// sign the vote and publish on internalMsgQueue
|
||||
func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote {
|
||||
func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header metadata.PartSetHeader) *types.Vote {
|
||||
if cs.privValidator == nil { // the node does not have a key
|
||||
return nil
|
||||
}
|
||||
@@ -2332,7 +2337,7 @@ func (cs *State) checkDoubleSigningRisk(height int64) error {
|
||||
lastCommit := cs.LoadCommit(height - i)
|
||||
if lastCommit != nil {
|
||||
for sigIdx, s := range lastCommit.Signatures {
|
||||
if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) {
|
||||
if s.BlockIDFlag == metadata.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) {
|
||||
cs.Logger.Info("found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i)
|
||||
return ErrSignatureFoundInPastBlocks
|
||||
}
|
||||
|
||||
@@ -17,8 +17,11 @@ import (
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmpubsub "github.com/tendermint/tendermint/libs/pubsub"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
/*
|
||||
@@ -61,8 +64,8 @@ func TestStateProposerSelection0(t *testing.T) {
|
||||
cs1, vss := randState(config, 4)
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
|
||||
startTestRound(cs1, height, round)
|
||||
|
||||
@@ -102,7 +105,7 @@ func TestStateProposerSelection2(t *testing.T) {
|
||||
|
||||
cs1, vss := randState(config, 4) // test needs more work for more than 3 validators
|
||||
height := cs1.Height
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
|
||||
// this time we jump in at round 2
|
||||
incrementRound(vss[1:]...)
|
||||
@@ -144,7 +147,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) {
|
||||
height, round := cs.Height, cs.Round
|
||||
|
||||
// Listen for propose timeout event
|
||||
timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
|
||||
timeoutCh := subscribe(cs.eventBus, events.EventQueryTimeoutPropose)
|
||||
|
||||
startTestRound(cs, height, round)
|
||||
|
||||
@@ -165,8 +168,8 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) {
|
||||
|
||||
// Listen for propose timeout event
|
||||
|
||||
timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose)
|
||||
proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutCh := subscribe(cs.eventBus, events.EventQueryTimeoutPropose)
|
||||
proposalCh := subscribe(cs.eventBus, events.EventQueryCompleteProposal)
|
||||
|
||||
cs.enterNewRound(height, round)
|
||||
cs.startRoutines(3)
|
||||
@@ -196,10 +199,10 @@ func TestStateBadProposal(t *testing.T) {
|
||||
height, round := cs1.Height, cs1.Round
|
||||
vs2 := vss[1]
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
voteCh := subscribe(cs1.eventBus, events.EventQueryVote)
|
||||
|
||||
propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2)
|
||||
|
||||
@@ -215,8 +218,8 @@ func TestStateBadProposal(t *testing.T) {
|
||||
stateHash[0] = (stateHash[0] + 1) % 255
|
||||
propBlock.AppHash = stateHash
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
proposal := types.NewProposal(vs2.Height, round, -1, blockID)
|
||||
blockID := metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
proposal := consensus.NewProposal(vs2.Height, round, -1, blockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -257,13 +260,13 @@ func TestStateOversizedBlock(t *testing.T) {
|
||||
height, round := cs1.Height, cs1.Round
|
||||
vs2 := vss[1]
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
voteCh := subscribe(cs1.eventBus, types.EventQueryVote)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
voteCh := subscribe(cs1.eventBus, events.EventQueryVote)
|
||||
|
||||
propBlock, _ := cs1.createProposalBlock()
|
||||
propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)}
|
||||
propBlock.Data.Txs = []mempool.Tx{tmrand.Bytes(2001)}
|
||||
propBlock.Header.DataHash = propBlock.Data.Hash()
|
||||
|
||||
// make the second validator the proposer by incrementing round
|
||||
@@ -271,8 +274,8 @@ func TestStateOversizedBlock(t *testing.T) {
|
||||
incrementRound(vss[1:]...)
|
||||
|
||||
propBlockParts := propBlock.MakePartSet(partSize)
|
||||
blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
proposal := types.NewProposal(height, round, -1, blockID)
|
||||
blockID := metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()}
|
||||
proposal := consensus.NewProposal(height, round, -1, blockID)
|
||||
p := proposal.ToProto()
|
||||
if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil {
|
||||
t.Fatal("failed to sign bad proposal", err)
|
||||
@@ -324,16 +327,16 @@ func TestStateFullRound1(t *testing.T) {
|
||||
if err := cs.eventBus.Stop(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
eventBus := types.NewEventBusWithBufferCapacity(0)
|
||||
eventBus := events.NewEventBusWithBufferCapacity(0)
|
||||
eventBus.SetLogger(log.TestingLogger().With("module", "events"))
|
||||
cs.SetEventBus(eventBus)
|
||||
if err := eventBus.Start(); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
|
||||
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
|
||||
propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound)
|
||||
voteCh := subscribeUnBuffered(cs.eventBus, events.EventQueryVote)
|
||||
propCh := subscribe(cs.eventBus, events.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs.eventBus, events.EventQueryNewRound)
|
||||
|
||||
// Maybe it would be better to call explicitly startRoutines(4)
|
||||
startTestRound(cs, height, round)
|
||||
@@ -361,7 +364,7 @@ func TestStateFullRoundNil(t *testing.T) {
|
||||
cs, vss := randState(config, 1)
|
||||
height, round := cs.Height, cs.Round
|
||||
|
||||
voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote)
|
||||
voteCh := subscribeUnBuffered(cs.eventBus, events.EventQueryVote)
|
||||
|
||||
cs.enterPrevote(height, round)
|
||||
cs.startRoutines(4)
|
||||
@@ -382,8 +385,8 @@ func TestStateFullRound2(t *testing.T) {
|
||||
vs2 := vss[1]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote)
|
||||
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
|
||||
voteCh := subscribeUnBuffered(cs1.eventBus, events.EventQueryVote)
|
||||
newBlockCh := subscribe(cs1.eventBus, events.EventQueryNewBlock)
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
startTestRound(cs1, height, round)
|
||||
@@ -424,13 +427,13 @@ func TestStateLockNoPOL(t *testing.T) {
|
||||
vs2 := vss[1]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
voteCh := subscribeUnBuffered(cs1.eventBus, events.EventQueryVote)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
|
||||
/*
|
||||
Round1 (cs1, B) // B B // B B2
|
||||
@@ -614,16 +617,16 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
newBlockCh := subscribe(cs1.eventBus, events.EventQueryNewBlockHeader)
|
||||
|
||||
// everything done from perspective of cs1
|
||||
|
||||
@@ -651,7 +654,7 @@ func TestStateLockPOLRelock(t *testing.T) {
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs2 := newState(cs1.state, vs2, kvstore.NewApplication())
|
||||
@@ -713,12 +716,12 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, events.EventQueryUnlock)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -750,7 +753,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
|
||||
// before we time out into new round, set next proposal block
|
||||
@@ -782,7 +785,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], lockedBlockHash)
|
||||
// now lets add prevotes from everyone else for nil (a polka!)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// the polka makes us unlock and precommit nil
|
||||
ensureNewUnlock(unlockCh, height, round)
|
||||
@@ -792,7 +795,7 @@ func TestStateLockPOLUnlock(t *testing.T) {
|
||||
// NOTE: since we don't relock on nil, the lock round is -1
|
||||
validatePrecommit(t, cs1, round, -1, vss[0], nil, nil)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3)
|
||||
ensureNewRound(newRoundCh, height, round+1)
|
||||
}
|
||||
|
||||
@@ -807,15 +810,15 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
// everything done from perspective of cs1
|
||||
|
||||
/*
|
||||
@@ -840,7 +843,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
validatePrecommit(t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash)
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs2 := newState(cs1.state, vs2, kvstore.NewApplication())
|
||||
@@ -884,7 +887,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) {
|
||||
}
|
||||
|
||||
// more prevote creating a majority on the new block and this is then committed
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// before we timeout to the new round set the new proposal
|
||||
cs3 := newState(cs1.state, vs3, kvstore.NewApplication())
|
||||
@@ -935,12 +938,12 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -965,7 +968,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash()))
|
||||
|
||||
// we do see them precommit nil
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
// cs1 precommit nil
|
||||
ensurePrecommit(voteCh, height, round)
|
||||
@@ -1011,7 +1014,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
// we should have precommitted
|
||||
validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
@@ -1033,7 +1036,7 @@ func TestStateLockPOLSafety1(t *testing.T) {
|
||||
// we should prevote what we're locked on
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
|
||||
newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep)
|
||||
newStepCh := subscribe(cs1.eventBus, events.EventQueryNewRoundStep)
|
||||
|
||||
// before prevotes from the previous round are added
|
||||
// add prevotes from the earlier round
|
||||
@@ -1058,12 +1061,12 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, events.EventQueryUnlock)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1074,7 +1077,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
_, propBlock0 := decideProposal(cs1, vss[0], height, round)
|
||||
propBlockHash0 := propBlock0.Hash()
|
||||
propBlockParts0 := propBlock0.MakePartSet(partSize)
|
||||
propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()}
|
||||
propBlockID0 := metadata.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()}
|
||||
|
||||
// the others sign a polka but we don't see it
|
||||
prevotes := signVotes(config, tmproto.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4)
|
||||
@@ -1107,7 +1110,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1)
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3)
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
@@ -1117,7 +1120,7 @@ func TestStateLockPOLSafety2(t *testing.T) {
|
||||
|
||||
round++ // moving to the next round
|
||||
// in round 2 we see the polkad block from round 0
|
||||
newProp := types.NewProposal(height, round, 0, propBlockID0)
|
||||
newProp := consensus.NewProposal(height, round, 0, propBlockID0)
|
||||
p := newProp.ToProto()
|
||||
if err := vs3.SignProposal(context.Background(), config.ChainID(), p); err != nil {
|
||||
t.Fatal(err)
|
||||
@@ -1157,13 +1160,13 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
unlockCh := subscribe(cs1.eventBus, events.EventQueryUnlock)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1188,7 +1191,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
// we should have precommitted
|
||||
validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
|
||||
@@ -1205,7 +1208,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
ensurePrevote(voteCh, height, round)
|
||||
validatePrevote(t, cs1, round, vss[0], propBlockHash)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewUnlock(unlockCh, height, round)
|
||||
|
||||
@@ -1216,7 +1219,7 @@ func TestProposeValidBlock(t *testing.T) {
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
round += 2 // moving to the next round
|
||||
|
||||
@@ -1249,12 +1252,12 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1277,7 +1280,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) {
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2)
|
||||
|
||||
// vs3 send prevote nil
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs3)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs3)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds())
|
||||
|
||||
@@ -1313,17 +1316,17 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
voteCh := subscribeToVoter(cs1, addr)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
|
||||
round++ // move to round in which P0 is not proposer
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
@@ -1371,14 +1374,14 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
|
||||
// start round
|
||||
startTestRound(cs1, height, round)
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds())
|
||||
ensureNewRound(newRoundCh, height, round+1)
|
||||
@@ -1394,8 +1397,8 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1408,7 +1411,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) {
|
||||
ensurePrevote(voteCh, height, round)
|
||||
|
||||
incrementRound(vss[1:]...)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
@@ -1432,8 +1435,8 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1446,7 +1449,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) {
|
||||
ensurePrevote(voteCh, height, round)
|
||||
|
||||
incrementRound(vss[1:]...)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
round++ // moving to the next round
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
@@ -1470,8 +1473,8 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, int32(1)
|
||||
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1482,7 +1485,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) {
|
||||
ensureNewRound(newRoundCh, height, round)
|
||||
|
||||
incrementRound(vss[1:]...)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4)
|
||||
signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4)
|
||||
|
||||
ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds())
|
||||
|
||||
@@ -1501,10 +1504,10 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) {
|
||||
|
||||
incrementRound(vs2, vs3, vs4)
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock)
|
||||
|
||||
_, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round)
|
||||
propBlockHash := propBlock.Hash()
|
||||
@@ -1535,11 +1538,11 @@ func TestCommitFromPreviousRound(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, int32(1)
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock)
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
|
||||
prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round)
|
||||
propBlockHash := propBlock.Hash()
|
||||
@@ -1593,12 +1596,12 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose)
|
||||
precommitTimeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose)
|
||||
precommitTimeoutCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
newBlockHeader := subscribe(cs1.eventBus, events.EventQueryNewBlockHeader)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1623,7 +1626,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) {
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
// add precommits
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
|
||||
// wait till timeout occurs
|
||||
@@ -1655,12 +1658,12 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
newBlockHeader := subscribe(cs1.eventBus, events.EventQueryNewBlockHeader)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1684,7 +1687,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) {
|
||||
validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash)
|
||||
|
||||
// add precommits
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3)
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4)
|
||||
|
||||
@@ -1715,9 +1718,9 @@ func TestStateSlashingPrevotes(t *testing.T) {
|
||||
vs2 := vss[1]
|
||||
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
@@ -1750,9 +1753,9 @@ func TestStateSlashingPrecommits(t *testing.T) {
|
||||
vs2 := vss[1]
|
||||
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress())
|
||||
|
||||
// start round and wait for propose and prevote
|
||||
@@ -1796,12 +1799,12 @@ func TestStateHalt1(t *testing.T) {
|
||||
cs1, vss := randState(config, 4)
|
||||
vs2, vs3, vs4 := vss[1], vss[2], vss[3]
|
||||
height, round := cs1.Height, cs1.Round
|
||||
partSize := types.BlockPartSizeBytes
|
||||
partSize := metadata.BlockPartSizeBytes
|
||||
|
||||
proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound)
|
||||
newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock)
|
||||
proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal)
|
||||
timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait)
|
||||
newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound)
|
||||
newBlockCh := subscribe(cs1.eventBus, events.EventQueryNewBlock)
|
||||
pv1, err := cs1.privValidator.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
addr := pv1.Address()
|
||||
@@ -1825,7 +1828,7 @@ func TestStateHalt1(t *testing.T) {
|
||||
validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash())
|
||||
|
||||
// add precommits from the rest
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2) // didnt receive proposal
|
||||
signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3)
|
||||
// we receive this later, but vs3 might receive it earlier and with ours will go to commit!
|
||||
precommit4 := signVote(vs4, config, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header())
|
||||
@@ -1867,14 +1870,14 @@ func TestStateOutputsBlockPartsStats(t *testing.T) {
|
||||
peer := p2pmock.NewPeer(nil)
|
||||
|
||||
// 1) new block part
|
||||
parts := types.NewPartSetFromData(tmrand.Bytes(100), 10)
|
||||
parts := metadata.NewPartSetFromData(tmrand.Bytes(100), 10)
|
||||
msg := &BlockPartMessage{
|
||||
Height: 1,
|
||||
Round: 0,
|
||||
Part: parts.GetPart(0),
|
||||
}
|
||||
|
||||
cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header())
|
||||
cs.ProposalBlockParts = metadata.NewPartSetFromHeader(parts.Header())
|
||||
cs.handleMsg(msgInfo{msg, peer.ID()})
|
||||
|
||||
statsMessage := <-cs.statsMsgQueue
|
||||
@@ -1913,7 +1916,7 @@ func TestStateOutputVoteStats(t *testing.T) {
|
||||
|
||||
randBytes := tmrand.Bytes(tmhash.Size)
|
||||
|
||||
vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{})
|
||||
vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, metadata.PartSetHeader{})
|
||||
|
||||
voteMessage := &VoteMessage{vote}
|
||||
cs.handleMsg(msgInfo{voteMessage, peer.ID()})
|
||||
@@ -1927,7 +1930,7 @@ func TestStateOutputVoteStats(t *testing.T) {
|
||||
|
||||
// sending the vote for the bigger height
|
||||
incrementHeight(vss[1])
|
||||
vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{})
|
||||
vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, metadata.PartSetHeader{})
|
||||
|
||||
cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()})
|
||||
|
||||
@@ -1950,21 +1953,21 @@ func TestSignSameVoteTwice(t *testing.T) {
|
||||
config,
|
||||
tmproto.PrecommitType,
|
||||
randBytes,
|
||||
types.PartSetHeader{Total: 10, Hash: randBytes},
|
||||
metadata.PartSetHeader{Total: 10, Hash: randBytes},
|
||||
)
|
||||
|
||||
vote2 := signVote(vss[1],
|
||||
config,
|
||||
tmproto.PrecommitType,
|
||||
randBytes,
|
||||
types.PartSetHeader{Total: 10, Hash: randBytes},
|
||||
metadata.PartSetHeader{Total: 10, Hash: randBytes},
|
||||
)
|
||||
|
||||
require.Equal(t, vote, vote2)
|
||||
}
|
||||
|
||||
// subscribe subscribes test client to the given query and returns a channel with cap = 1.
|
||||
func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
|
||||
func subscribe(eventBus *events.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
|
||||
sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
|
||||
@@ -1973,7 +1976,7 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa
|
||||
}
|
||||
|
||||
// subscribe subscribes test client to the given query and returns a channel with cap = 0.
|
||||
func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
|
||||
func subscribeUnBuffered(eventBus *events.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message {
|
||||
sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q))
|
||||
|
||||
@@ -8,13 +8,15 @@ import (
|
||||
|
||||
tmjson "github.com/tendermint/tendermint/libs/json"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type RoundVoteSet struct {
|
||||
Prevotes *types.VoteSet
|
||||
Precommits *types.VoteSet
|
||||
Prevotes *consensus.VoteSet
|
||||
Precommits *consensus.VoteSet
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -40,15 +42,15 @@ One for their LastCommit round, and another for the official commit round.
|
||||
type HeightVoteSet struct {
|
||||
chainID string
|
||||
height int64
|
||||
valSet *types.ValidatorSet
|
||||
valSet *consensus.ValidatorSet
|
||||
|
||||
mtx sync.Mutex
|
||||
round int32 // max tracked round
|
||||
roundVoteSets map[int32]RoundVoteSet // keys: [0...round]
|
||||
peerCatchupRounds map[types.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds
|
||||
round int32 // max tracked round
|
||||
roundVoteSets map[int32]RoundVoteSet // keys: [0...round]
|
||||
peerCatchupRounds map[p2p.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds
|
||||
}
|
||||
|
||||
func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet {
|
||||
func NewHeightVoteSet(chainID string, height int64, valSet *consensus.ValidatorSet) *HeightVoteSet {
|
||||
hvs := &HeightVoteSet{
|
||||
chainID: chainID,
|
||||
}
|
||||
@@ -56,14 +58,14 @@ func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet)
|
||||
return hvs
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) {
|
||||
func (hvs *HeightVoteSet) Reset(height int64, valSet *consensus.ValidatorSet) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
|
||||
hvs.height = height
|
||||
hvs.valSet = valSet
|
||||
hvs.roundVoteSets = make(map[int32]RoundVoteSet)
|
||||
hvs.peerCatchupRounds = make(map[types.NodeID][]int32)
|
||||
hvs.peerCatchupRounds = make(map[p2p.NodeID][]int32)
|
||||
|
||||
hvs.addRound(0)
|
||||
hvs.round = 0
|
||||
@@ -103,8 +105,8 @@ func (hvs *HeightVoteSet) addRound(round int32) {
|
||||
panic("addRound() for an existing round")
|
||||
}
|
||||
// log.Debug("addRound(round)", "round", round)
|
||||
prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet)
|
||||
precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet)
|
||||
prevotes := consensus.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet)
|
||||
precommits := consensus.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet)
|
||||
hvs.roundVoteSets[round] = RoundVoteSet{
|
||||
Prevotes: prevotes,
|
||||
Precommits: precommits,
|
||||
@@ -113,10 +115,10 @@ func (hvs *HeightVoteSet) addRound(round int32) {
|
||||
|
||||
// Duplicate votes return added=false, err=nil.
|
||||
// By convention, peerID is "" if origin is self.
|
||||
func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) {
|
||||
func (hvs *HeightVoteSet) AddVote(vote *consensus.Vote, peerID p2p.NodeID) (added bool, err error) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
if !types.IsVoteTypeValid(vote.Type) {
|
||||
if !consensus.IsVoteTypeValid(vote.Type) {
|
||||
return
|
||||
}
|
||||
voteSet := hvs.getVoteSet(vote.Round, vote.Type)
|
||||
@@ -135,13 +137,13 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID types.NodeID) (added
|
||||
return
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) Prevotes(round int32) *types.VoteSet {
|
||||
func (hvs *HeightVoteSet) Prevotes(round int32) *consensus.VoteSet {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
return hvs.getVoteSet(round, tmproto.PrevoteType)
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) Precommits(round int32) *types.VoteSet {
|
||||
func (hvs *HeightVoteSet) Precommits(round int32) *consensus.VoteSet {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
return hvs.getVoteSet(round, tmproto.PrecommitType)
|
||||
@@ -149,7 +151,7 @@ func (hvs *HeightVoteSet) Precommits(round int32) *types.VoteSet {
|
||||
|
||||
// Last round and blockID that has +2/3 prevotes for a particular block or nil.
|
||||
// Returns -1 if no such round exists.
|
||||
func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) {
|
||||
func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID metadata.BlockID) {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
for r := hvs.round; r >= 0; r-- {
|
||||
@@ -159,10 +161,10 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) {
|
||||
return r, polBlockID
|
||||
}
|
||||
}
|
||||
return -1, types.BlockID{}
|
||||
return -1, metadata.BlockID{}
|
||||
}
|
||||
|
||||
func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *types.VoteSet {
|
||||
func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *consensus.VoteSet {
|
||||
rvs, ok := hvs.roundVoteSets[round]
|
||||
if !ok {
|
||||
return nil
|
||||
@@ -184,18 +186,18 @@ func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType
|
||||
func (hvs *HeightVoteSet) SetPeerMaj23(
|
||||
round int32,
|
||||
voteType tmproto.SignedMsgType,
|
||||
peerID types.NodeID,
|
||||
blockID types.BlockID) error {
|
||||
peerID p2p.NodeID,
|
||||
blockID metadata.BlockID) error {
|
||||
hvs.mtx.Lock()
|
||||
defer hvs.mtx.Unlock()
|
||||
if !types.IsVoteTypeValid(voteType) {
|
||||
if !consensus.IsVoteTypeValid(voteType) {
|
||||
return fmt.Errorf("setPeerMaj23: Invalid vote type %X", voteType)
|
||||
}
|
||||
voteSet := hvs.getVoteSet(round, voteType)
|
||||
if voteSet == nil {
|
||||
return nil // something we don't know about yet
|
||||
}
|
||||
return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID)
|
||||
return voteSet.SetPeerMaj23(consensus.P2PID(peerID), blockID)
|
||||
}
|
||||
|
||||
//---------------------------------------------------------
|
||||
|
||||
@@ -11,8 +11,9 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var config *cfg.Config // NOTE: must be reset for each _test.go file
|
||||
@@ -57,7 +58,7 @@ func TestPeerCatchupRounds(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote {
|
||||
func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []consensus.PrivValidator) *consensus.Vote {
|
||||
privVal := privVals[valIndex]
|
||||
pubKey, err := privVal.GetPubKey(context.Background())
|
||||
if err != nil {
|
||||
@@ -66,14 +67,14 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty
|
||||
|
||||
randBytes := tmrand.Bytes(tmhash.Size)
|
||||
|
||||
vote := &types.Vote{
|
||||
vote := &consensus.Vote{
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
ValidatorIndex: valIndex,
|
||||
Height: height,
|
||||
Round: round,
|
||||
Timestamp: tmtime.Now(),
|
||||
Type: tmproto.PrecommitType,
|
||||
BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}},
|
||||
BlockID: metadata.BlockID{Hash: randBytes, PartSetHeader: metadata.PartSetHeader{}},
|
||||
}
|
||||
chainID := config.ChainID()
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/bits"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -21,9 +21,9 @@ type PeerRoundState struct {
|
||||
StartTime time.Time `json:"start_time"`
|
||||
|
||||
// True if peer has proposal for this round
|
||||
Proposal bool `json:"proposal"`
|
||||
ProposalBlockPartSetHeader types.PartSetHeader `json:"proposal_block_part_set_header"`
|
||||
ProposalBlockParts *bits.BitArray `json:"proposal_block_parts"`
|
||||
Proposal bool `json:"proposal"`
|
||||
ProposalBlockPartSetHeader metadata.PartSetHeader `json:"proposal_block_part_set_header"`
|
||||
ProposalBlockParts *bits.BitArray `json:"proposal_block_parts"`
|
||||
// Proposal's POL round. -1 if none.
|
||||
ProposalPOLRound int32 `json:"proposal_pol_round"`
|
||||
|
||||
@@ -57,7 +57,7 @@ func (prs PeerRoundState) Copy() PeerRoundState {
|
||||
|
||||
hashCopy := make([]byte, len(headerHash))
|
||||
copy(hashCopy, headerHash)
|
||||
prs.ProposalBlockPartSetHeader = types.PartSetHeader{
|
||||
prs.ProposalBlockPartSetHeader = metadata.PartSetHeader{
|
||||
Total: prs.ProposalBlockPartSetHeader.Total,
|
||||
Hash: hashCopy,
|
||||
}
|
||||
|
||||
@@ -6,7 +6,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/libs/bytes"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
@@ -71,37 +74,37 @@ type RoundState struct {
|
||||
StartTime time.Time `json:"start_time"`
|
||||
|
||||
// Subjective time when +2/3 precommits for Block at Round were found
|
||||
CommitTime time.Time `json:"commit_time"`
|
||||
Validators *types.ValidatorSet `json:"validators"`
|
||||
Proposal *types.Proposal `json:"proposal"`
|
||||
ProposalBlock *types.Block `json:"proposal_block"`
|
||||
ProposalBlockParts *types.PartSet `json:"proposal_block_parts"`
|
||||
LockedRound int32 `json:"locked_round"`
|
||||
LockedBlock *types.Block `json:"locked_block"`
|
||||
LockedBlockParts *types.PartSet `json:"locked_block_parts"`
|
||||
CommitTime time.Time `json:"commit_time"`
|
||||
Validators *consensus.ValidatorSet `json:"validators"`
|
||||
Proposal *consensus.Proposal `json:"proposal"`
|
||||
ProposalBlock *block.Block `json:"proposal_block"`
|
||||
ProposalBlockParts *metadata.PartSet `json:"proposal_block_parts"`
|
||||
LockedRound int32 `json:"locked_round"`
|
||||
LockedBlock *block.Block `json:"locked_block"`
|
||||
LockedBlockParts *metadata.PartSet `json:"locked_block_parts"`
|
||||
|
||||
// Last known round with POL for non-nil valid block.
|
||||
ValidRound int32 `json:"valid_round"`
|
||||
ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above.
|
||||
ValidBlock *block.Block `json:"valid_block"` // Last known block of POL mentioned above.
|
||||
|
||||
// Last known block parts of POL mentioned above.
|
||||
ValidBlockParts *types.PartSet `json:"valid_block_parts"`
|
||||
Votes *HeightVoteSet `json:"votes"`
|
||||
CommitRound int32 `json:"commit_round"` //
|
||||
LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||
LastValidators *types.ValidatorSet `json:"last_validators"`
|
||||
TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"`
|
||||
ValidBlockParts *metadata.PartSet `json:"valid_block_parts"`
|
||||
Votes *HeightVoteSet `json:"votes"`
|
||||
CommitRound int32 `json:"commit_round"` //
|
||||
LastCommit *consensus.VoteSet `json:"last_commit"` // Last precommits at Height-1
|
||||
LastValidators *consensus.ValidatorSet `json:"last_validators"`
|
||||
TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"`
|
||||
}
|
||||
|
||||
// Compressed version of the RoundState for use in RPC
|
||||
type RoundStateSimple struct {
|
||||
HeightRoundStep string `json:"height/round/step"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
ProposalBlockHash bytes.HexBytes `json:"proposal_block_hash"`
|
||||
LockedBlockHash bytes.HexBytes `json:"locked_block_hash"`
|
||||
ValidBlockHash bytes.HexBytes `json:"valid_block_hash"`
|
||||
Votes json.RawMessage `json:"height_vote_set"`
|
||||
Proposer types.ValidatorInfo `json:"proposer"`
|
||||
HeightRoundStep string `json:"height/round/step"`
|
||||
StartTime time.Time `json:"start_time"`
|
||||
ProposalBlockHash bytes.HexBytes `json:"proposal_block_hash"`
|
||||
LockedBlockHash bytes.HexBytes `json:"locked_block_hash"`
|
||||
ValidBlockHash bytes.HexBytes `json:"valid_block_hash"`
|
||||
Votes json.RawMessage `json:"height_vote_set"`
|
||||
Proposer consensus.ValidatorInfo `json:"proposer"`
|
||||
}
|
||||
|
||||
// Compress the RoundState to RoundStateSimple
|
||||
@@ -121,7 +124,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple {
|
||||
LockedBlockHash: rs.LockedBlock.Hash(),
|
||||
ValidBlockHash: rs.ValidBlock.Hash(),
|
||||
Votes: votesJSON,
|
||||
Proposer: types.ValidatorInfo{
|
||||
Proposer: consensus.ValidatorInfo{
|
||||
Address: addr,
|
||||
Index: idx,
|
||||
},
|
||||
@@ -129,15 +132,15 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple {
|
||||
}
|
||||
|
||||
// NewRoundEvent returns the RoundState with proposer information as an event.
|
||||
func (rs *RoundState) NewRoundEvent() types.EventDataNewRound {
|
||||
func (rs *RoundState) NewRoundEvent() events.EventDataNewRound {
|
||||
addr := rs.Validators.GetProposer().Address
|
||||
idx, _ := rs.Validators.GetByAddress(addr)
|
||||
|
||||
return types.EventDataNewRound{
|
||||
return events.EventDataNewRound{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step.String(),
|
||||
Proposer: types.ValidatorInfo{
|
||||
Proposer: consensus.ValidatorInfo{
|
||||
Address: addr,
|
||||
Index: idx,
|
||||
},
|
||||
@@ -145,15 +148,15 @@ func (rs *RoundState) NewRoundEvent() types.EventDataNewRound {
|
||||
}
|
||||
|
||||
// CompleteProposalEvent returns information about a proposed block as an event.
|
||||
func (rs *RoundState) CompleteProposalEvent() types.EventDataCompleteProposal {
|
||||
func (rs *RoundState) CompleteProposalEvent() events.EventDataCompleteProposal {
|
||||
// We must construct BlockID from ProposalBlock and ProposalBlockParts
|
||||
// cs.Proposal is not guaranteed to be set when this function is called
|
||||
blockID := types.BlockID{
|
||||
blockID := metadata.BlockID{
|
||||
Hash: rs.ProposalBlock.Hash(),
|
||||
PartSetHeader: rs.ProposalBlockParts.Header(),
|
||||
}
|
||||
|
||||
return types.EventDataCompleteProposal{
|
||||
return events.EventDataCompleteProposal{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step.String(),
|
||||
@@ -162,8 +165,8 @@ func (rs *RoundState) CompleteProposalEvent() types.EventDataCompleteProposal {
|
||||
}
|
||||
|
||||
// RoundStateEvent returns the H/R/S of the RoundState as an event.
|
||||
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
|
||||
return types.EventDataRoundState{
|
||||
func (rs *RoundState) RoundStateEvent() events.EventDataRoundState {
|
||||
return events.EventDataRoundState{
|
||||
Height: rs.Height,
|
||||
Round: rs.Round,
|
||||
Step: rs.Step.String(),
|
||||
|
||||
@@ -16,11 +16,12 @@ import (
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/privval"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a
|
||||
@@ -46,7 +47,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
genDoc, err := types.GenesisDocFromFile(config.GenesisFile())
|
||||
genDoc, err := consensus.GenesisDocFromFile(config.GenesisFile())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to read genesis file: %w", err)
|
||||
}
|
||||
@@ -75,7 +76,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) {
|
||||
}
|
||||
})
|
||||
|
||||
eventBus := types.NewEventBus()
|
||||
eventBus := events.NewEventBus()
|
||||
eventBus.SetLogger(logger.With("module", "events"))
|
||||
if err := eventBus.Start(); err != nil {
|
||||
return fmt.Errorf("failed to start event bus: %w", err)
|
||||
|
||||
@@ -17,7 +17,8 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/libs/autofile"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmtime "github.com/tendermint/tendermint/libs/time"
|
||||
tmtypes "github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/events"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -71,7 +72,7 @@ func TestWALTruncate(t *testing.T) {
|
||||
dec := NewWALDecoder(gr)
|
||||
msg, err := dec.Decode()
|
||||
assert.NoError(t, err, "expected to decode a message")
|
||||
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
|
||||
rs, ok := msg.Msg.(events.EventDataRoundState)
|
||||
assert.True(t, ok, "expected message of type EventDataRoundState")
|
||||
assert.Equal(t, rs.Height, h+1, "wrong height")
|
||||
}
|
||||
@@ -81,7 +82,7 @@ func TestWALEncoderDecoder(t *testing.T) {
|
||||
msgs := []TimedWALMessage{
|
||||
{Time: now, Msg: EndHeightMessage{0}},
|
||||
{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}},
|
||||
{Time: now, Msg: tmtypes.EventDataRoundState{Height: 1, Round: 1, Step: ""}},
|
||||
{Time: now, Msg: events.EventDataRoundState{Height: 1, Round: 1, Step: ""}},
|
||||
}
|
||||
|
||||
b := new(bytes.Buffer)
|
||||
@@ -124,7 +125,7 @@ func TestWALWrite(t *testing.T) {
|
||||
msg := &BlockPartMessage{
|
||||
Height: 1,
|
||||
Round: 1,
|
||||
Part: &tmtypes.Part{
|
||||
Part: &metadata.Part{
|
||||
Index: 1,
|
||||
Bytes: make([]byte, 1),
|
||||
Proof: merkle.Proof{
|
||||
@@ -164,7 +165,7 @@ func TestWALSearchForEndHeight(t *testing.T) {
|
||||
dec := NewWALDecoder(gr)
|
||||
msg, err := dec.Decode()
|
||||
assert.NoError(t, err, "expected to decode a message")
|
||||
rs, ok := msg.Msg.(tmtypes.EventDataRoundState)
|
||||
rs, ok := msg.Msg.(events.EventDataRoundState)
|
||||
assert.True(t, ok, "expected message of type EventDataRoundState")
|
||||
assert.Equal(t, rs.Height, h+1, "wrong height")
|
||||
}
|
||||
|
||||
@@ -3,8 +3,11 @@
|
||||
package mocks
|
||||
|
||||
import (
|
||||
block "github.com/tendermint/tendermint/pkg/block"
|
||||
|
||||
metadata "github.com/tendermint/tendermint/pkg/metadata"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// BlockStore is an autogenerated mock type for the BlockStore type
|
||||
@@ -27,15 +30,15 @@ func (_m *BlockStore) Height() int64 {
|
||||
}
|
||||
|
||||
// LoadBlockCommit provides a mock function with given fields: height
|
||||
func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
func (_m *BlockStore) LoadBlockCommit(height int64) *metadata.Commit {
|
||||
ret := _m.Called(height)
|
||||
|
||||
var r0 *types.Commit
|
||||
if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok {
|
||||
var r0 *metadata.Commit
|
||||
if rf, ok := ret.Get(0).(func(int64) *metadata.Commit); ok {
|
||||
r0 = rf(height)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.Commit)
|
||||
r0 = ret.Get(0).(*metadata.Commit)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,15 +46,15 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit {
|
||||
}
|
||||
|
||||
// LoadBlockMeta provides a mock function with given fields: height
|
||||
func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta {
|
||||
func (_m *BlockStore) LoadBlockMeta(height int64) *block.BlockMeta {
|
||||
ret := _m.Called(height)
|
||||
|
||||
var r0 *types.BlockMeta
|
||||
if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok {
|
||||
var r0 *block.BlockMeta
|
||||
if rf, ok := ret.Get(0).(func(int64) *block.BlockMeta); ok {
|
||||
r0 = rf(height)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.BlockMeta)
|
||||
r0 = ret.Get(0).(*block.BlockMeta)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -15,9 +15,10 @@ import (
|
||||
|
||||
clist "github.com/tendermint/tendermint/internal/libs/clist"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
types "github.com/tendermint/tendermint/pkg/evidence"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -185,7 +186,7 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error {
|
||||
// the new state called.
|
||||
//
|
||||
// Votes are not verified.
|
||||
func (evpool *Pool) ReportConflictingVotes(voteA, voteB *types.Vote) {
|
||||
func (evpool *Pool) ReportConflictingVotes(voteA, voteB *consensus.Vote) {
|
||||
evpool.mtx.Lock()
|
||||
defer evpool.mtx.Unlock()
|
||||
evpool.consensusBuffer = append(evpool.consensusBuffer, duplicateVoteSet{
|
||||
@@ -581,8 +582,8 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) {
|
||||
}
|
||||
|
||||
type duplicateVoteSet struct {
|
||||
VoteA *types.Vote
|
||||
VoteB *types.Vote
|
||||
VoteA *consensus.Vote
|
||||
VoteB *consensus.Vote
|
||||
}
|
||||
|
||||
func bytesToEv(evBytes []byte) (types.Evidence, error) {
|
||||
|
||||
@@ -15,11 +15,15 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/evidence/mocks"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
types "github.com/tendermint/tendermint/pkg/evidence"
|
||||
"github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
smmocks "github.com/tendermint/tendermint/state/mocks"
|
||||
sf "github.com/tendermint/tendermint/state/test/factory"
|
||||
"github.com/tendermint/tendermint/store"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/version"
|
||||
)
|
||||
|
||||
@@ -41,7 +45,7 @@ func TestEvidencePoolBasic(t *testing.T) {
|
||||
valSet, privVals := factory.RandValidatorSet(1, 10)
|
||||
|
||||
blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(
|
||||
&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}},
|
||||
&block.BlockMeta{Header: metadata.Header{Time: defaultEvidenceTime}},
|
||||
)
|
||||
stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil)
|
||||
stateStore.On("Load").Return(createState(height+1, valSet), nil)
|
||||
@@ -89,7 +93,7 @@ func TestEvidencePoolBasic(t *testing.T) {
|
||||
// Tests inbound evidence for the right time and height
|
||||
func TestAddExpiredEvidence(t *testing.T) {
|
||||
var (
|
||||
val = types.NewMockPV()
|
||||
val = consensus.NewMockPV()
|
||||
height = int64(30)
|
||||
stateStore = initializeValidatorState(t, val, height)
|
||||
evidenceDB = dbm.NewMemDB()
|
||||
@@ -98,11 +102,11 @@ func TestAddExpiredEvidence(t *testing.T) {
|
||||
expiredHeight = int64(2)
|
||||
)
|
||||
|
||||
blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta {
|
||||
blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *block.BlockMeta {
|
||||
if h == height || h == expiredHeight {
|
||||
return &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}
|
||||
return &block.BlockMeta{Header: metadata.Header{Time: defaultEvidenceTime}}
|
||||
}
|
||||
return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}}
|
||||
return &block.BlockMeta{Header: metadata.Header{Time: expiredEvidenceTime}}
|
||||
})
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore)
|
||||
@@ -141,7 +145,7 @@ func TestReportConflictingVotes(t *testing.T) {
|
||||
var height int64 = 10
|
||||
|
||||
pool, pv := defaultTestPool(t, height)
|
||||
val := types.NewValidator(pv.PrivKey.PubKey(), 10)
|
||||
val := consensus.NewValidator(pv.PrivKey.PubKey(), 10)
|
||||
ev := types.NewMockDuplicateVoteEvidenceWithValidator(height+1, defaultEvidenceTime, pv, evidenceChainID)
|
||||
|
||||
pool.ReportConflictingVotes(ev.VoteA, ev.VoteB)
|
||||
@@ -161,7 +165,7 @@ func TestReportConflictingVotes(t *testing.T) {
|
||||
state := pool.State()
|
||||
state.LastBlockHeight++
|
||||
state.LastBlockTime = ev.Time()
|
||||
state.LastValidators = types.NewValidatorSet([]*types.Validator{val})
|
||||
state.LastValidators = consensus.NewValidatorSet([]*consensus.Validator{val})
|
||||
pool.Update(state, []types.Evidence{})
|
||||
|
||||
// should be able to retrieve evidence from pool
|
||||
@@ -202,7 +206,7 @@ func TestEvidencePoolUpdate(t *testing.T) {
|
||||
evidenceChainID,
|
||||
)
|
||||
lastCommit := makeCommit(height, val.PrivKey.PubKey().Address())
|
||||
block := types.MakeBlock(height+1, []types.Tx{}, lastCommit, []types.Evidence{ev})
|
||||
block := block.MakeBlock(height+1, []mempool.Tx{}, lastCommit, []types.Evidence{ev})
|
||||
|
||||
// update state (partially)
|
||||
state.LastBlockHeight = height + 1
|
||||
@@ -278,7 +282,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) {
|
||||
state := sm.State{
|
||||
LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour),
|
||||
LastBlockHeight: 110,
|
||||
ConsensusParams: *types.DefaultConsensusParams(),
|
||||
ConsensusParams: *consensus.DefaultConsensusParams(),
|
||||
}
|
||||
|
||||
stateStore := &smmocks.Store{}
|
||||
@@ -287,8 +291,8 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) {
|
||||
stateStore.On("Load").Return(state, nil)
|
||||
|
||||
blockStore := &mocks.BlockStore{}
|
||||
blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header})
|
||||
blockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header})
|
||||
blockStore.On("LoadBlockMeta", height).Return(&block.BlockMeta{Header: *trusted.Header})
|
||||
blockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header})
|
||||
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
|
||||
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
|
||||
|
||||
@@ -327,7 +331,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) {
|
||||
// pending evidence and continue to gossip it
|
||||
func TestRecoverPendingEvidence(t *testing.T) {
|
||||
height := int64(10)
|
||||
val := types.NewMockPV()
|
||||
val := consensus.NewMockPV()
|
||||
valAddress := val.PrivKey.PubKey().Address()
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
stateStore := initializeValidatorState(t, val, height)
|
||||
@@ -362,12 +366,12 @@ func TestRecoverPendingEvidence(t *testing.T) {
|
||||
newStateStore.On("Load").Return(sm.State{
|
||||
LastBlockTime: defaultEvidenceTime.Add(25 * time.Minute),
|
||||
LastBlockHeight: height + 15,
|
||||
ConsensusParams: types.ConsensusParams{
|
||||
Block: types.BlockParams{
|
||||
ConsensusParams: consensus.ConsensusParams{
|
||||
Block: consensus.BlockParams{
|
||||
MaxBytes: 22020096,
|
||||
MaxGas: -1,
|
||||
},
|
||||
Evidence: types.EvidenceParams{
|
||||
Evidence: consensus.EvidenceParams{
|
||||
MaxAgeNumBlocks: 20,
|
||||
MaxAgeDuration: 20 * time.Minute,
|
||||
MaxBytes: defaultEvidenceMaxBytes,
|
||||
@@ -385,7 +389,7 @@ func TestRecoverPendingEvidence(t *testing.T) {
|
||||
require.Equal(t, goodEvidence, next.Value.(types.Evidence))
|
||||
}
|
||||
|
||||
func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, height int64) sm.Store {
|
||||
func initializeStateFromValidatorSet(t *testing.T, valSet *consensus.ValidatorSet, height int64) sm.Store {
|
||||
stateDB := dbm.NewMemDB()
|
||||
stateStore := sm.NewStore(stateDB)
|
||||
state := sm.State{
|
||||
@@ -397,12 +401,12 @@ func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, h
|
||||
NextValidators: valSet.CopyIncrementProposerPriority(1),
|
||||
LastValidators: valSet,
|
||||
LastHeightValidatorsChanged: 1,
|
||||
ConsensusParams: types.ConsensusParams{
|
||||
Block: types.BlockParams{
|
||||
ConsensusParams: consensus.ConsensusParams{
|
||||
Block: consensus.BlockParams{
|
||||
MaxBytes: 22020096,
|
||||
MaxGas: -1,
|
||||
},
|
||||
Evidence: types.EvidenceParams{
|
||||
Evidence: consensus.EvidenceParams{
|
||||
MaxAgeNumBlocks: 20,
|
||||
MaxAgeDuration: 20 * time.Minute,
|
||||
MaxBytes: 1000,
|
||||
@@ -419,13 +423,13 @@ func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, h
|
||||
return stateStore
|
||||
}
|
||||
|
||||
func initializeValidatorState(t *testing.T, privVal types.PrivValidator, height int64) sm.Store {
|
||||
func initializeValidatorState(t *testing.T, privVal consensus.PrivValidator, height int64) sm.Store {
|
||||
pubKey, _ := privVal.GetPubKey(context.Background())
|
||||
validator := &types.Validator{Address: pubKey.Address(), VotingPower: 10, PubKey: pubKey}
|
||||
validator := &consensus.Validator{Address: pubKey.Address(), VotingPower: 10, PubKey: pubKey}
|
||||
|
||||
// create validator set and state
|
||||
valSet := &types.ValidatorSet{
|
||||
Validators: []*types.Validator{validator},
|
||||
valSet := &consensus.ValidatorSet{
|
||||
Validators: []*consensus.Validator{validator},
|
||||
Proposer: validator,
|
||||
}
|
||||
|
||||
@@ -452,19 +456,19 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) *store.Bloc
|
||||
return blockStore
|
||||
}
|
||||
|
||||
func makeCommit(height int64, valAddr []byte) *types.Commit {
|
||||
commitSigs := []types.CommitSig{{
|
||||
BlockIDFlag: types.BlockIDFlagCommit,
|
||||
func makeCommit(height int64, valAddr []byte) *metadata.Commit {
|
||||
commitSigs := []metadata.CommitSig{{
|
||||
BlockIDFlag: metadata.BlockIDFlagCommit,
|
||||
ValidatorAddress: valAddr,
|
||||
Timestamp: defaultEvidenceTime,
|
||||
Signature: []byte("Signature"),
|
||||
}}
|
||||
|
||||
return types.NewCommit(height, 0, types.BlockID{}, commitSigs)
|
||||
return metadata.NewCommit(height, 0, metadata.BlockID{}, commitSigs)
|
||||
}
|
||||
|
||||
func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, types.MockPV) {
|
||||
val := types.NewMockPV()
|
||||
func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, consensus.MockPV) {
|
||||
val := consensus.NewMockPV()
|
||||
valAddress := val.PrivKey.PubKey().Address()
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
stateStore := initializeValidatorState(t, val, height)
|
||||
@@ -477,12 +481,12 @@ func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, types.MockPV)
|
||||
return pool, val
|
||||
}
|
||||
|
||||
func createState(height int64, valSet *types.ValidatorSet) sm.State {
|
||||
func createState(height int64, valSet *consensus.ValidatorSet) sm.State {
|
||||
return sm.State{
|
||||
ChainID: evidenceChainID,
|
||||
LastBlockHeight: height,
|
||||
LastBlockTime: defaultEvidenceTime,
|
||||
Validators: valSet,
|
||||
ConsensusParams: *types.DefaultConsensusParams(),
|
||||
ConsensusParams: *consensus.DefaultConsensusParams(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,8 +11,9 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
types "github.com/tendermint/tendermint/pkg/evidence"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -63,7 +64,7 @@ type Reactor struct {
|
||||
peerWG sync.WaitGroup
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerRoutines map[types.NodeID]*tmsync.Closer
|
||||
peerRoutines map[p2ptypes.NodeID]*tmsync.Closer
|
||||
}
|
||||
|
||||
// NewReactor returns a reference to a new evidence reactor, which implements the
|
||||
@@ -80,7 +81,7 @@ func NewReactor(
|
||||
evidenceCh: evidenceCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
peerRoutines: make(map[types.NodeID]*tmsync.Closer),
|
||||
peerRoutines: make(map[p2ptypes.NodeID]*tmsync.Closer),
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Evidence", r)
|
||||
@@ -291,7 +292,7 @@ func (r *Reactor) processPeerUpdates() {
|
||||
// that the peer has already received or may not be ready for.
|
||||
//
|
||||
// REF: https://github.com/tendermint/tendermint/issues/4727
|
||||
func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Closer) {
|
||||
func (r *Reactor) broadcastEvidenceLoop(peerID p2ptypes.NodeID, closer *tmsync.Closer) {
|
||||
var next *clist.CElement
|
||||
|
||||
defer func() {
|
||||
|
||||
@@ -21,9 +21,13 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
types "github.com/tendermint/tendermint/pkg/evidence"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -35,11 +39,11 @@ var (
|
||||
type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
logger log.Logger
|
||||
reactors map[types.NodeID]*evidence.Reactor
|
||||
pools map[types.NodeID]*evidence.Pool
|
||||
evidenceChannels map[types.NodeID]*p2p.Channel
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
reactors map[p2ptypes.NodeID]*evidence.Reactor
|
||||
pools map[p2ptypes.NodeID]*evidence.Pool
|
||||
evidenceChannels map[p2ptypes.NodeID]*p2p.Channel
|
||||
peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates
|
||||
peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate
|
||||
nodes []*p2ptest.Node
|
||||
numStateStores int
|
||||
}
|
||||
@@ -56,10 +60,10 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite {
|
||||
numStateStores: numStateStores,
|
||||
logger: log.TestingLogger().With("testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}),
|
||||
reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores),
|
||||
pools: make(map[types.NodeID]*evidence.Pool, numStateStores),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores),
|
||||
reactors: make(map[p2ptypes.NodeID]*evidence.Reactor, numStateStores),
|
||||
pools: make(map[p2ptypes.NodeID]*evidence.Pool, numStateStores),
|
||||
peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numStateStores),
|
||||
peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numStateStores),
|
||||
}
|
||||
|
||||
chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)}
|
||||
@@ -76,9 +80,9 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite {
|
||||
evidenceDB := dbm.NewMemDB()
|
||||
blockStore := &mocks.BlockStore{}
|
||||
state, _ := stateStores[idx].Load()
|
||||
blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta {
|
||||
blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *block.BlockMeta {
|
||||
if h <= state.LastBlockHeight {
|
||||
return &types.BlockMeta{Header: types.Header{Time: evidenceTime}}
|
||||
return &block.BlockMeta{Header: metadata.Header{Time: evidenceTime}}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -124,7 +128,7 @@ func (rts *reactorTestSuite) start(t *testing.T) {
|
||||
"network does not have expected number of nodes")
|
||||
}
|
||||
|
||||
func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...types.NodeID) {
|
||||
func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...p2ptypes.NodeID) {
|
||||
t.Helper()
|
||||
|
||||
fn := func(pool *evidence.Pool) {
|
||||
@@ -188,7 +192,7 @@ func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.Evidence
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
go func(id types.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id)
|
||||
go func(id p2ptypes.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id)
|
||||
}
|
||||
wg.Wait()
|
||||
}
|
||||
@@ -211,7 +215,7 @@ func (rts *reactorTestSuite) assertEvidenceChannelsEmpty(t *testing.T) {
|
||||
func createEvidenceList(
|
||||
t *testing.T,
|
||||
pool *evidence.Pool,
|
||||
val types.PrivValidator,
|
||||
val consensus.PrivValidator,
|
||||
numEvidence int,
|
||||
) types.EvidenceList {
|
||||
t.Helper()
|
||||
@@ -236,7 +240,7 @@ func createEvidenceList(
|
||||
}
|
||||
|
||||
func TestReactorMultiDisconnect(t *testing.T) {
|
||||
val := types.NewMockPV()
|
||||
val := consensus.NewMockPV()
|
||||
height := int64(numEvidence) + 10
|
||||
|
||||
stateDB1 := initializeValidatorState(t, val, height)
|
||||
@@ -275,7 +279,7 @@ func TestReactorBroadcastEvidence(t *testing.T) {
|
||||
|
||||
// create a stateDB for all test suites (nodes)
|
||||
stateDBs := make([]sm.Store, numPeers)
|
||||
val := types.NewMockPV()
|
||||
val := consensus.NewMockPV()
|
||||
|
||||
// We need all validators saved for heights at least as high as we have
|
||||
// evidence for.
|
||||
@@ -293,7 +297,7 @@ func TestReactorBroadcastEvidence(t *testing.T) {
|
||||
// primary. As a result, the primary will gossip all evidence to each secondary.
|
||||
primary := rts.network.RandomNode()
|
||||
secondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1)
|
||||
secondaryIDs := make([]types.NodeID, 0, cap(secondaries))
|
||||
secondaryIDs := make([]p2ptypes.NodeID, 0, cap(secondaries))
|
||||
for id := range rts.network.Nodes {
|
||||
if id == primary.NodeID {
|
||||
continue
|
||||
@@ -329,7 +333,7 @@ func TestReactorBroadcastEvidence(t *testing.T) {
|
||||
// connected to one another but are at different heights. Reactor 1 which is
|
||||
// ahead receives a list of evidence.
|
||||
func TestReactorBroadcastEvidence_Lagging(t *testing.T) {
|
||||
val := types.NewMockPV()
|
||||
val := consensus.NewMockPV()
|
||||
height1 := int64(numEvidence) + 10
|
||||
height2 := int64(numEvidence) / 2
|
||||
|
||||
@@ -365,7 +369,7 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReactorBroadcastEvidence_Pending(t *testing.T) {
|
||||
val := types.NewMockPV()
|
||||
val := consensus.NewMockPV()
|
||||
height := int64(10)
|
||||
|
||||
stateDB1 := initializeValidatorState(t, val, height)
|
||||
@@ -405,7 +409,7 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestReactorBroadcastEvidence_Committed(t *testing.T) {
|
||||
val := types.NewMockPV()
|
||||
val := consensus.NewMockPV()
|
||||
height := int64(10)
|
||||
|
||||
stateDB1 := initializeValidatorState(t, val, height)
|
||||
@@ -465,7 +469,7 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) {
|
||||
|
||||
// create a stateDB for all test suites (nodes)
|
||||
stateDBs := make([]sm.Store, numPeers)
|
||||
val := types.NewMockPV()
|
||||
val := consensus.NewMockPV()
|
||||
|
||||
// We need all validators saved for heights at least as high as we have
|
||||
// evidence for.
|
||||
@@ -506,18 +510,18 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) {
|
||||
|
||||
// nolint:lll
|
||||
func TestEvidenceListSerialization(t *testing.T) {
|
||||
exampleVote := func(msgType byte) *types.Vote {
|
||||
var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z")
|
||||
exampleVote := func(msgType byte) *consensus.Vote {
|
||||
var stamp, err = time.Parse(metadata.TimeFormat, "2017-12-25T03:00:01.234Z")
|
||||
require.NoError(t, err)
|
||||
|
||||
return &types.Vote{
|
||||
return &consensus.Vote{
|
||||
Type: tmproto.SignedMsgType(msgType),
|
||||
Height: 3,
|
||||
Round: 2,
|
||||
Timestamp: stamp,
|
||||
BlockID: types.BlockID{
|
||||
BlockID: metadata.BlockID{
|
||||
Hash: tmhash.Sum([]byte("blockID_hash")),
|
||||
PartSetHeader: types.PartSetHeader{
|
||||
PartSetHeader: metadata.PartSetHeader{
|
||||
Total: 1000000,
|
||||
Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")),
|
||||
},
|
||||
@@ -527,12 +531,12 @@ func TestEvidenceListSerialization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
val := &types.Validator{
|
||||
val := &consensus.Validator{
|
||||
Address: crypto.AddressHash([]byte("validator_address")),
|
||||
VotingPower: 10,
|
||||
}
|
||||
|
||||
valSet := types.NewValidatorSet([]*types.Validator{val})
|
||||
valSet := consensus.NewValidatorSet([]*consensus.Validator{val})
|
||||
|
||||
dupl := types.NewDuplicateVoteEvidence(
|
||||
exampleVote(1),
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
package evidence
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
)
|
||||
|
||||
//go:generate ../../scripts/mockery_generate.sh BlockStore
|
||||
|
||||
type BlockStore interface {
|
||||
LoadBlockMeta(height int64) *types.BlockMeta
|
||||
LoadBlockCommit(height int64) *types.Commit
|
||||
LoadBlockMeta(height int64) *block.BlockMeta
|
||||
LoadBlockCommit(height int64) *metadata.Commit
|
||||
Height() int64
|
||||
}
|
||||
|
||||
@@ -7,7 +7,9 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/tendermint/tendermint/light"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
types "github.com/tendermint/tendermint/pkg/evidence"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
)
|
||||
|
||||
// verify verifies the evidence fully by checking:
|
||||
@@ -156,8 +158,8 @@ func (evpool *Pool) verify(evidence types.Evidence) error {
|
||||
//
|
||||
// CONTRACT: must run ValidateBasic() on the evidence before verifying
|
||||
// must check that the evidence has not expired (i.e. is outside the maximum age threshold)
|
||||
func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader,
|
||||
commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error {
|
||||
func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *metadata.SignedHeader,
|
||||
commonVals *consensus.ValidatorSet, now time.Time, trustPeriod time.Duration) error {
|
||||
// In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single
|
||||
// verification jump between the common header and the conflicting one
|
||||
if commonHeader.Height != e.ConflictingBlock.Height {
|
||||
@@ -199,7 +201,7 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t
|
||||
// - the height, round, type and validator address of the votes must be the same
|
||||
// - the block ID's must be different
|
||||
// - The signatures must both be valid
|
||||
func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *types.ValidatorSet) error {
|
||||
func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *consensus.ValidatorSet) error {
|
||||
_, val := valSet.GetByAddress(e.VoteA.ValidatorAddress)
|
||||
if val == nil {
|
||||
return fmt.Errorf("address %X was not a validator at height %d", e.VoteA.ValidatorAddress, e.Height())
|
||||
@@ -241,17 +243,17 @@ func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet
|
||||
va := e.VoteA.ToProto()
|
||||
vb := e.VoteB.ToProto()
|
||||
// Signatures must be valid
|
||||
if !pubKey.VerifySignature(types.VoteSignBytes(chainID, va), e.VoteA.Signature) {
|
||||
return fmt.Errorf("verifying VoteA: %w", types.ErrVoteInvalidSignature)
|
||||
if !pubKey.VerifySignature(consensus.VoteSignBytes(chainID, va), e.VoteA.Signature) {
|
||||
return fmt.Errorf("verifying VoteA: %w", consensus.ErrVoteInvalidSignature)
|
||||
}
|
||||
if !pubKey.VerifySignature(types.VoteSignBytes(chainID, vb), e.VoteB.Signature) {
|
||||
return fmt.Errorf("verifying VoteB: %w", types.ErrVoteInvalidSignature)
|
||||
if !pubKey.VerifySignature(consensus.VoteSignBytes(chainID, vb), e.VoteB.Signature) {
|
||||
return fmt.Errorf("verifying VoteB: %w", consensus.ErrVoteInvalidSignature)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader, error) {
|
||||
func getSignedHeader(blockStore BlockStore, height int64) (*metadata.SignedHeader, error) {
|
||||
blockMeta := blockStore.LoadBlockMeta(height)
|
||||
if blockMeta == nil {
|
||||
return nil, fmt.Errorf("don't have header at height #%d", height)
|
||||
@@ -260,7 +262,7 @@ func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader,
|
||||
if commit == nil {
|
||||
return nil, fmt.Errorf("don't have commit at height #%d", height)
|
||||
}
|
||||
return &types.SignedHeader{
|
||||
return &metadata.SignedHeader{
|
||||
Header: &blockMeta.Header,
|
||||
Commit: commit,
|
||||
}, nil
|
||||
|
||||
@@ -17,10 +17,14 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/evidence/mocks"
|
||||
"github.com/tendermint/tendermint/internal/test/factory"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
"github.com/tendermint/tendermint/pkg/consensus"
|
||||
types "github.com/tendermint/tendermint/pkg/evidence"
|
||||
"github.com/tendermint/tendermint/pkg/light"
|
||||
"github.com/tendermint/tendermint/pkg/metadata"
|
||||
tmproto "github.com/tendermint/tendermint/proto/tendermint/types"
|
||||
sm "github.com/tendermint/tendermint/state"
|
||||
smmocks "github.com/tendermint/tendermint/state/mocks"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -81,14 +85,14 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) {
|
||||
state := sm.State{
|
||||
LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour),
|
||||
LastBlockHeight: height + 1,
|
||||
ConsensusParams: *types.DefaultConsensusParams(),
|
||||
ConsensusParams: *consensus.DefaultConsensusParams(),
|
||||
}
|
||||
stateStore := &smmocks.Store{}
|
||||
stateStore.On("LoadValidators", commonHeight).Return(common.ValidatorSet, nil)
|
||||
stateStore.On("Load").Return(state, nil)
|
||||
blockStore := &mocks.BlockStore{}
|
||||
blockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header})
|
||||
blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header})
|
||||
blockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header})
|
||||
blockStore.On("LoadBlockMeta", height).Return(&block.BlockMeta{Header: *trusted.Header})
|
||||
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
|
||||
blockStore.On("LoadBlockCommit", height).Return(trusted.Commit)
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
@@ -150,7 +154,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
|
||||
state := sm.State{
|
||||
LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour),
|
||||
LastBlockHeight: nodeHeight,
|
||||
ConsensusParams: *types.DefaultConsensusParams(),
|
||||
ConsensusParams: *consensus.DefaultConsensusParams(),
|
||||
}
|
||||
|
||||
// modify trusted light block so that it is of a height less than the conflicting one
|
||||
@@ -161,8 +165,8 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
|
||||
stateStore.On("LoadValidators", commonHeight).Return(common.ValidatorSet, nil)
|
||||
stateStore.On("Load").Return(state, nil)
|
||||
blockStore := &mocks.BlockStore{}
|
||||
blockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header})
|
||||
blockStore.On("LoadBlockMeta", nodeHeight).Return(&types.BlockMeta{Header: *trusted.Header})
|
||||
blockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header})
|
||||
blockStore.On("LoadBlockMeta", nodeHeight).Return(&block.BlockMeta{Header: *trusted.Header})
|
||||
blockStore.On("LoadBlockMeta", attackHeight).Return(nil)
|
||||
blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
|
||||
blockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit)
|
||||
@@ -177,8 +181,8 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
|
||||
oldBlockStore := &mocks.BlockStore{}
|
||||
oldHeader := trusted.Header
|
||||
oldHeader.Time = defaultEvidenceTime
|
||||
oldBlockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header})
|
||||
oldBlockStore.On("LoadBlockMeta", nodeHeight).Return(&types.BlockMeta{Header: *oldHeader})
|
||||
oldBlockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header})
|
||||
oldBlockStore.On("LoadBlockMeta", nodeHeight).Return(&block.BlockMeta{Header: *oldHeader})
|
||||
oldBlockStore.On("LoadBlockMeta", attackHeight).Return(nil)
|
||||
oldBlockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit)
|
||||
oldBlockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit)
|
||||
@@ -193,7 +197,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) {
|
||||
func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10)
|
||||
|
||||
conflictingHeader, err := factory.MakeHeader(&types.Header{
|
||||
conflictingHeader, err := factory.MakeHeader(&metadata.Header{
|
||||
ChainID: evidenceChainID,
|
||||
Height: 10,
|
||||
Time: defaultEvidenceTime,
|
||||
@@ -201,7 +205,7 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
trustedHeader, _ := factory.MakeHeader(&types.Header{
|
||||
trustedHeader, _ := factory.MakeHeader(&metadata.Header{
|
||||
ChainID: evidenceChainID,
|
||||
Height: 10,
|
||||
Time: defaultEvidenceTime,
|
||||
@@ -215,12 +219,12 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
// we are simulating a duplicate vote attack where all the validators in the conflictingVals set
|
||||
// except the last validator vote twice
|
||||
blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash())
|
||||
voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
voteSet := consensus.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
commit, err := factory.MakeCommit(blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
ev := &types.LightClientAttackEvidence{
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
ConflictingBlock: &light.LightBlock{
|
||||
SignedHeader: &metadata.SignedHeader{
|
||||
Header: conflictingHeader,
|
||||
Commit: commit,
|
||||
},
|
||||
@@ -233,11 +237,11 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
}
|
||||
|
||||
trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash"))
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
trustedVoteSet := consensus.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
trustedCommit, err := factory.MakeCommit(trustedBlockID, 10, 1,
|
||||
trustedVoteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
trustedSignedHeader := &types.SignedHeader{
|
||||
trustedSignedHeader := &metadata.SignedHeader{
|
||||
Header: trustedHeader,
|
||||
Commit: trustedCommit,
|
||||
}
|
||||
@@ -264,13 +268,13 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) {
|
||||
state := sm.State{
|
||||
LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute),
|
||||
LastBlockHeight: 11,
|
||||
ConsensusParams: *types.DefaultConsensusParams(),
|
||||
ConsensusParams: *consensus.DefaultConsensusParams(),
|
||||
}
|
||||
stateStore := &smmocks.Store{}
|
||||
stateStore.On("LoadValidators", int64(10)).Return(conflictingVals, nil)
|
||||
stateStore.On("Load").Return(state, nil)
|
||||
blockStore := &mocks.BlockStore{}
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&block.BlockMeta{Header: *trustedHeader})
|
||||
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
@@ -288,7 +292,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
var height int64 = 10
|
||||
conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10)
|
||||
|
||||
conflictingHeader, err := factory.MakeHeader(&types.Header{
|
||||
conflictingHeader, err := factory.MakeHeader(&metadata.Header{
|
||||
ChainID: evidenceChainID,
|
||||
Height: height,
|
||||
Time: defaultEvidenceTime,
|
||||
@@ -296,7 +300,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
trustedHeader, _ := factory.MakeHeader(&types.Header{
|
||||
trustedHeader, _ := factory.MakeHeader(&metadata.Header{
|
||||
ChainID: evidenceChainID,
|
||||
Height: height,
|
||||
Time: defaultEvidenceTime,
|
||||
@@ -310,12 +314,12 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
// we are simulating an amnesia attack where all the validators in the conflictingVals set
|
||||
// except the last validator vote twice. However this time the commits are of different rounds.
|
||||
blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash"))
|
||||
voteSet := types.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals)
|
||||
voteSet := consensus.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals)
|
||||
commit, err := factory.MakeCommit(blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
ev := &types.LightClientAttackEvidence{
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
ConflictingBlock: &light.LightBlock{
|
||||
SignedHeader: &metadata.SignedHeader{
|
||||
Header: conflictingHeader,
|
||||
Commit: commit,
|
||||
},
|
||||
@@ -328,11 +332,11 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
}
|
||||
|
||||
trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash"))
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
trustedVoteSet := consensus.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
trustedCommit, err := factory.MakeCommit(trustedBlockID, height, 1,
|
||||
trustedVoteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
trustedSignedHeader := &types.SignedHeader{
|
||||
trustedSignedHeader := &metadata.SignedHeader{
|
||||
Header: trustedHeader,
|
||||
Commit: trustedCommit,
|
||||
}
|
||||
@@ -350,13 +354,13 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
state := sm.State{
|
||||
LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute),
|
||||
LastBlockHeight: 11,
|
||||
ConsensusParams: *types.DefaultConsensusParams(),
|
||||
ConsensusParams: *consensus.DefaultConsensusParams(),
|
||||
}
|
||||
stateStore := &smmocks.Store{}
|
||||
stateStore.On("LoadValidators", int64(10)).Return(conflictingVals, nil)
|
||||
stateStore.On("Load").Return(state, nil)
|
||||
blockStore := &mocks.BlockStore{}
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader})
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&block.BlockMeta{Header: *trustedHeader})
|
||||
blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit)
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
@@ -371,15 +375,15 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) {
|
||||
}
|
||||
|
||||
type voteData struct {
|
||||
vote1 *types.Vote
|
||||
vote2 *types.Vote
|
||||
vote1 *consensus.Vote
|
||||
vote2 *consensus.Vote
|
||||
valid bool
|
||||
}
|
||||
|
||||
func TestVerifyDuplicateVoteEvidence(t *testing.T) {
|
||||
val := types.NewMockPV()
|
||||
val2 := types.NewMockPV()
|
||||
valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(1)})
|
||||
val := consensus.NewMockPV()
|
||||
val2 := consensus.NewMockPV()
|
||||
valSet := consensus.NewValidatorSet([]*consensus.Validator{val.ExtractIntoValidator(1)})
|
||||
|
||||
blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash"))
|
||||
blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash"))
|
||||
@@ -443,13 +447,13 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) {
|
||||
ChainID: chainID,
|
||||
LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute),
|
||||
LastBlockHeight: 11,
|
||||
ConsensusParams: *types.DefaultConsensusParams(),
|
||||
ConsensusParams: *consensus.DefaultConsensusParams(),
|
||||
}
|
||||
stateStore := &smmocks.Store{}
|
||||
stateStore.On("LoadValidators", int64(10)).Return(valSet, nil)
|
||||
stateStore.On("Load").Return(state, nil)
|
||||
blockStore := &mocks.BlockStore{}
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}})
|
||||
blockStore.On("LoadBlockMeta", int64(10)).Return(&block.BlockMeta{Header: metadata.Header{Time: defaultEvidenceTime}})
|
||||
|
||||
pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore)
|
||||
require.NoError(t, err)
|
||||
@@ -474,7 +478,7 @@ func makeLunaticEvidence(
|
||||
height, commonHeight int64,
|
||||
totalVals, byzVals, phantomVals int,
|
||||
commonTime, attackTime time.Time,
|
||||
) (ev *types.LightClientAttackEvidence, trusted *types.LightBlock, common *types.LightBlock) {
|
||||
) (ev *types.LightClientAttackEvidence, trusted *light.LightBlock, common *light.LightBlock) {
|
||||
commonValSet, commonPrivVals := factory.RandValidatorSet(totalVals, defaultVotingPower)
|
||||
|
||||
require.Greater(t, totalVals, byzVals)
|
||||
@@ -490,20 +494,20 @@ func makeLunaticEvidence(
|
||||
|
||||
conflictingPrivVals = orderPrivValsByValSet(t, conflictingVals, conflictingPrivVals)
|
||||
|
||||
commonHeader, err := factory.MakeHeader(&types.Header{
|
||||
commonHeader, err := factory.MakeHeader(&metadata.Header{
|
||||
ChainID: evidenceChainID,
|
||||
Height: commonHeight,
|
||||
Time: commonTime,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
trustedHeader, err := factory.MakeHeader(&types.Header{
|
||||
trustedHeader, err := factory.MakeHeader(&metadata.Header{
|
||||
ChainID: evidenceChainID,
|
||||
Height: height,
|
||||
Time: defaultEvidenceTime,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
conflictingHeader, err := factory.MakeHeader(&types.Header{
|
||||
conflictingHeader, err := factory.MakeHeader(&metadata.Header{
|
||||
ChainID: evidenceChainID,
|
||||
Height: height,
|
||||
Time: attackTime,
|
||||
@@ -512,12 +516,12 @@ func makeLunaticEvidence(
|
||||
require.NoError(t, err)
|
||||
|
||||
blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash())
|
||||
voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
voteSet := consensus.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals)
|
||||
commit, err := factory.MakeCommit(blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
ev = &types.LightClientAttackEvidence{
|
||||
ConflictingBlock: &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
ConflictingBlock: &light.LightBlock{
|
||||
SignedHeader: &metadata.SignedHeader{
|
||||
Header: conflictingHeader,
|
||||
Commit: commit,
|
||||
},
|
||||
@@ -529,21 +533,21 @@ func makeLunaticEvidence(
|
||||
Timestamp: commonTime,
|
||||
}
|
||||
|
||||
common = &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
common = &light.LightBlock{
|
||||
SignedHeader: &metadata.SignedHeader{
|
||||
Header: commonHeader,
|
||||
// we can leave this empty because we shouldn't be checking this
|
||||
Commit: &types.Commit{},
|
||||
Commit: &metadata.Commit{},
|
||||
},
|
||||
ValidatorSet: commonValSet,
|
||||
}
|
||||
trustedBlockID := factory.MakeBlockIDWithHash(trustedHeader.Hash())
|
||||
trustedVals, privVals := factory.RandValidatorSet(totalVals, defaultVotingPower)
|
||||
trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals)
|
||||
trustedVoteSet := consensus.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals)
|
||||
trustedCommit, err := factory.MakeCommit(trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime)
|
||||
require.NoError(t, err)
|
||||
trusted = &types.LightBlock{
|
||||
SignedHeader: &types.SignedHeader{
|
||||
trusted = &light.LightBlock{
|
||||
SignedHeader: &metadata.SignedHeader{
|
||||
Header: trustedHeader,
|
||||
Commit: trustedCommit,
|
||||
},
|
||||
@@ -553,11 +557,11 @@ func makeLunaticEvidence(
|
||||
}
|
||||
|
||||
func makeVote(
|
||||
t *testing.T, val types.PrivValidator, chainID string, valIndex int32, height int64,
|
||||
round int32, step int, blockID types.BlockID, time time.Time) *types.Vote {
|
||||
t *testing.T, val consensus.PrivValidator, chainID string, valIndex int32, height int64,
|
||||
round int32, step int, blockID metadata.BlockID, time time.Time) *consensus.Vote {
|
||||
pubKey, err := val.GetPubKey(context.Background())
|
||||
require.NoError(t, err)
|
||||
v := &types.Vote{
|
||||
v := &consensus.Vote{
|
||||
ValidatorAddress: pubKey.Address(),
|
||||
ValidatorIndex: valIndex,
|
||||
Height: height,
|
||||
@@ -576,16 +580,16 @@ func makeVote(
|
||||
return v
|
||||
}
|
||||
|
||||
func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID {
|
||||
func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) metadata.BlockID {
|
||||
var (
|
||||
h = make([]byte, tmhash.Size)
|
||||
psH = make([]byte, tmhash.Size)
|
||||
)
|
||||
copy(h, hash)
|
||||
copy(psH, partSetHash)
|
||||
return types.BlockID{
|
||||
return metadata.BlockID{
|
||||
Hash: h,
|
||||
PartSetHeader: types.PartSetHeader{
|
||||
PartSetHeader: metadata.PartSetHeader{
|
||||
Total: partSetSize,
|
||||
Hash: psH,
|
||||
},
|
||||
@@ -593,8 +597,8 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc
|
||||
}
|
||||
|
||||
func orderPrivValsByValSet(
|
||||
t *testing.T, vals *types.ValidatorSet, privVals []types.PrivValidator) []types.PrivValidator {
|
||||
output := make([]types.PrivValidator, len(privVals))
|
||||
t *testing.T, vals *consensus.ValidatorSet, privVals []consensus.PrivValidator) []consensus.PrivValidator {
|
||||
output := make([]consensus.PrivValidator, len(privVals))
|
||||
for idx, v := range vals.Validators {
|
||||
for _, p := range privVals {
|
||||
pubKey, err := p.GetPubKey(context.Background())
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"container/list"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
)
|
||||
|
||||
// TxCache defines an interface for raw transaction caching in a mempool.
|
||||
|
||||
@@ -4,21 +4,21 @@ import (
|
||||
"fmt"
|
||||
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// nolint: golint
|
||||
// TODO: Rename type.
|
||||
type MempoolIDs struct {
|
||||
mtx tmsync.RWMutex
|
||||
peerMap map[types.NodeID]uint16
|
||||
peerMap map[p2p.NodeID]uint16
|
||||
nextID uint16 // assumes that a node will never have over 65536 active peers
|
||||
activeIDs map[uint16]struct{} // used to check if a given peerID key is used
|
||||
}
|
||||
|
||||
func NewMempoolIDs() *MempoolIDs {
|
||||
return &MempoolIDs{
|
||||
peerMap: make(map[types.NodeID]uint16),
|
||||
peerMap: make(map[p2p.NodeID]uint16),
|
||||
|
||||
// reserve UnknownPeerID for mempoolReactor.BroadcastTx
|
||||
activeIDs: map[uint16]struct{}{UnknownPeerID: {}},
|
||||
@@ -28,7 +28,7 @@ func NewMempoolIDs() *MempoolIDs {
|
||||
|
||||
// ReserveForPeer searches for the next unused ID and assigns it to the provided
|
||||
// peer.
|
||||
func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) {
|
||||
func (ids *MempoolIDs) ReserveForPeer(peerID p2p.NodeID) {
|
||||
ids.mtx.Lock()
|
||||
defer ids.mtx.Unlock()
|
||||
|
||||
@@ -38,7 +38,7 @@ func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) {
|
||||
}
|
||||
|
||||
// Reclaim returns the ID reserved for the peer back to unused pool.
|
||||
func (ids *MempoolIDs) Reclaim(peerID types.NodeID) {
|
||||
func (ids *MempoolIDs) Reclaim(peerID p2p.NodeID) {
|
||||
ids.mtx.Lock()
|
||||
defer ids.mtx.Unlock()
|
||||
|
||||
@@ -50,7 +50,7 @@ func (ids *MempoolIDs) Reclaim(peerID types.NodeID) {
|
||||
}
|
||||
|
||||
// GetForPeer returns an ID reserved for the peer.
|
||||
func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 {
|
||||
func (ids *MempoolIDs) GetForPeer(peerID p2p.NodeID) uint16 {
|
||||
ids.mtx.RLock()
|
||||
defer ids.mtx.RUnlock()
|
||||
|
||||
|
||||
@@ -4,13 +4,13 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
func TestMempoolIDsBasic(t *testing.T) {
|
||||
ids := NewMempoolIDs()
|
||||
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
ids.ReserveForPeer(peerID)
|
||||
|
||||
@@ -5,9 +5,10 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -108,7 +109,7 @@ type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error
|
||||
// to the expected maxBytes.
|
||||
func PreCheckMaxBytes(maxBytes int64) PreCheckFunc {
|
||||
return func(tx types.Tx) error {
|
||||
txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx})
|
||||
txSize := block.ComputeProtoSizeForTxs([]types.Tx{tx})
|
||||
|
||||
if txSize > maxBytes {
|
||||
return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes)
|
||||
|
||||
@@ -3,10 +3,10 @@ package mock
|
||||
import (
|
||||
"context"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
mempl "github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
)
|
||||
|
||||
// Mempool is an empty implementation of a Mempool, useful for testing.
|
||||
|
||||
@@ -3,20 +3,21 @@ package mempool
|
||||
import (
|
||||
"crypto/sha256"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// TxKeySize defines the size of the transaction's key used for indexing.
|
||||
const TxKeySize = sha256.Size
|
||||
|
||||
// TxKey is the fixed length array key used as an index.
|
||||
func TxKey(tx types.Tx) [TxKeySize]byte {
|
||||
func TxKey(tx mempool.Tx) [TxKeySize]byte {
|
||||
return sha256.Sum256(tx)
|
||||
}
|
||||
|
||||
// TxHashFromBytes returns the hash of a transaction from raw bytes.
|
||||
func TxHashFromBytes(tx []byte) []byte {
|
||||
return types.Tx(tx).Hash()
|
||||
return mempool.Tx(tx).Hash()
|
||||
}
|
||||
|
||||
// TxInfo are parameters that get passed when attempting to add a tx to the
|
||||
@@ -28,5 +29,5 @@ type TxInfo struct {
|
||||
SenderID uint16
|
||||
|
||||
// SenderNodeID is the actual types.NodeID of the sender.
|
||||
SenderNodeID types.NodeID
|
||||
SenderNodeID p2p.NodeID
|
||||
}
|
||||
|
||||
@@ -8,10 +8,10 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
func TestCacheAfterUpdate(t *testing.T) {
|
||||
|
||||
@@ -7,16 +7,18 @@ import (
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
pubmempool "github.com/tendermint/tendermint/pkg/mempool"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// CListMempool is an ordered in-memory pool for transactions before they are
|
||||
@@ -303,7 +305,7 @@ func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) {
|
||||
func (mem *CListMempool) reqResCb(
|
||||
tx []byte,
|
||||
peerID uint16,
|
||||
peerP2PID types.NodeID,
|
||||
peerP2PID p2p.NodeID,
|
||||
externalCb func(*abci.Response),
|
||||
) func(res *abci.Response) {
|
||||
return func(res *abci.Response) {
|
||||
@@ -382,7 +384,7 @@ func (mem *CListMempool) isFull(txSize int) error {
|
||||
func (mem *CListMempool) resCbFirstTime(
|
||||
tx []byte,
|
||||
peerID uint16,
|
||||
peerP2PID types.NodeID,
|
||||
peerP2PID p2p.NodeID,
|
||||
res *abci.Response,
|
||||
) {
|
||||
switch r := res.Value.(type) {
|
||||
@@ -522,7 +524,7 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
|
||||
|
||||
txs = append(txs, memTx.tx)
|
||||
|
||||
dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx})
|
||||
dataSize := block.ComputeProtoSizeForTxs([]types.Tx{memTx.tx})
|
||||
|
||||
// Check total size requirement
|
||||
if maxBytes > -1 && runningSize+dataSize > maxBytes {
|
||||
|
||||
@@ -17,15 +17,15 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abciserver "github.com/tendermint/tendermint/abci/server"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
pubmempool "github.com/tendermint/tendermint/pkg/mempool"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// A cleanupFunc cleans up any config / test files created for a particular
|
||||
|
||||
@@ -15,8 +15,9 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -28,7 +29,7 @@ var (
|
||||
// peer information. This should eventually be replaced with a message-oriented
|
||||
// approach utilizing the p2p stack.
|
||||
type PeerManager interface {
|
||||
GetHeight(types.NodeID) int64
|
||||
GetHeight(p2ptypes.NodeID) int64
|
||||
}
|
||||
|
||||
// Reactor implements a service that contains mempool of txs that are broadcasted
|
||||
@@ -55,7 +56,7 @@ type Reactor struct {
|
||||
peerWG sync.WaitGroup
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerRoutines map[types.NodeID]*tmsync.Closer
|
||||
peerRoutines map[p2ptypes.NodeID]*tmsync.Closer
|
||||
}
|
||||
|
||||
// NewReactor returns a reference to a new reactor.
|
||||
@@ -76,7 +77,7 @@ func NewReactor(
|
||||
mempoolCh: mempoolCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
peerRoutines: make(map[types.NodeID]*tmsync.Closer),
|
||||
peerRoutines: make(map[p2ptypes.NodeID]*tmsync.Closer),
|
||||
}
|
||||
|
||||
r.BaseService = *service.NewBaseService(logger, "Mempool", r)
|
||||
@@ -305,7 +306,7 @@ func (r *Reactor) processPeerUpdates() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
|
||||
func (r *Reactor) broadcastTxRoutine(peerID p2ptypes.NodeID, closer *tmsync.Closer) {
|
||||
peerMempoolID := r.ids.GetForPeer(peerID)
|
||||
var next *clist.CElement
|
||||
|
||||
|
||||
@@ -9,31 +9,32 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
cfg "github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmrand "github.com/tendermint/tendermint/libs/rand"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
logger log.Logger
|
||||
|
||||
reactors map[types.NodeID]*Reactor
|
||||
mempoolChnnels map[types.NodeID]*p2p.Channel
|
||||
mempools map[types.NodeID]*CListMempool
|
||||
kvstores map[types.NodeID]*kvstore.Application
|
||||
reactors map[p2ptypes.NodeID]*Reactor
|
||||
mempoolChnnels map[p2ptypes.NodeID]*p2p.Channel
|
||||
mempools map[p2ptypes.NodeID]*CListMempool
|
||||
kvstores map[p2ptypes.NodeID]*kvstore.Application
|
||||
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates
|
||||
|
||||
nodes []types.NodeID
|
||||
nodes []p2ptypes.NodeID
|
||||
}
|
||||
|
||||
func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite {
|
||||
@@ -42,12 +43,12 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac
|
||||
rts := &reactorTestSuite{
|
||||
logger: log.TestingLogger().With("testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes),
|
||||
mempools: make(map[types.NodeID]*CListMempool, numNodes),
|
||||
kvstores: make(map[types.NodeID]*kvstore.Application, numNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes),
|
||||
mempoolChnnels: make(map[p2ptypes.NodeID]*p2p.Channel, numNodes),
|
||||
mempools: make(map[p2ptypes.NodeID]*CListMempool, numNodes),
|
||||
kvstores: make(map[p2ptypes.NodeID]*kvstore.Application, numNodes),
|
||||
peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
}
|
||||
|
||||
chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)}
|
||||
@@ -118,7 +119,7 @@ func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) {
|
||||
func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...p2ptypes.NodeID) {
|
||||
t.Helper()
|
||||
|
||||
fn := func(pool *CListMempool) {
|
||||
@@ -149,7 +150,7 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...typ
|
||||
}
|
||||
|
||||
wg.Add(1)
|
||||
func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id)
|
||||
func(nid p2ptypes.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
@@ -313,7 +314,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) {
|
||||
|
||||
nodeID := rts.nodes[0]
|
||||
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
peerID, err := p2ptypes.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
// ensure the reactor does not panic (i.e. exhaust active IDs)
|
||||
@@ -357,7 +358,7 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) {
|
||||
// 0 is already reserved for UnknownPeerID
|
||||
ids := mempool.NewMempoolIDs()
|
||||
|
||||
peerID, err := types.NewNodeID("0011223344556677889900112233445566778899")
|
||||
peerID, err := p2ptypes.NewNodeID("0011223344556677889900112233445566778899")
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < mempool.MaxActiveIDs-1; i++ {
|
||||
|
||||
@@ -7,16 +7,17 @@ import (
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
tmmath "github.com/tendermint/tendermint/libs/math"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
"github.com/tendermint/tendermint/pkg/block"
|
||||
pubmempool "github.com/tendermint/tendermint/pkg/mempool"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var _ mempool.Mempool = (*TxMempool)(nil)
|
||||
@@ -356,7 +357,7 @@ func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs {
|
||||
wtx := txmp.priorityIndex.PopTx()
|
||||
txs = append(txs, wtx.tx)
|
||||
wTxs = append(wTxs, wtx)
|
||||
size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx})
|
||||
size := block.ComputeProtoSizeForTxs([]types.Tx{wtx.tx})
|
||||
|
||||
// Ensure we have capacity for the transaction with respect to the
|
||||
// transaction size.
|
||||
|
||||
@@ -17,12 +17,12 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/tendermint/tendermint/abci/example/code"
|
||||
"github.com/tendermint/tendermint/abci/example/kvstore"
|
||||
abci "github.com/tendermint/tendermint/abci/types"
|
||||
"github.com/tendermint/tendermint/config"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/pkg/abci"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
"github.com/tendermint/tendermint/proxy"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
// application extends the KV store application by overriding CheckTx to provide
|
||||
|
||||
@@ -15,8 +15,9 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -28,7 +29,7 @@ var (
|
||||
// peer information. This should eventually be replaced with a message-oriented
|
||||
// approach utilizing the p2p stack.
|
||||
type PeerManager interface {
|
||||
GetHeight(types.NodeID) int64
|
||||
GetHeight(p2ptypes.NodeID) int64
|
||||
}
|
||||
|
||||
// Reactor implements a service that contains mempool of txs that are broadcasted
|
||||
@@ -59,7 +60,7 @@ type Reactor struct {
|
||||
observePanic func(interface{})
|
||||
|
||||
mtx tmsync.Mutex
|
||||
peerRoutines map[types.NodeID]*tmsync.Closer
|
||||
peerRoutines map[p2ptypes.NodeID]*tmsync.Closer
|
||||
}
|
||||
|
||||
// NewReactor returns a reference to a new reactor.
|
||||
@@ -80,7 +81,7 @@ func NewReactor(
|
||||
mempoolCh: mempoolCh,
|
||||
peerUpdates: peerUpdates,
|
||||
closeCh: make(chan struct{}),
|
||||
peerRoutines: make(map[types.NodeID]*tmsync.Closer),
|
||||
peerRoutines: make(map[p2ptypes.NodeID]*tmsync.Closer),
|
||||
observePanic: defaultObservePanic,
|
||||
}
|
||||
|
||||
@@ -313,7 +314,7 @@ func (r *Reactor) processPeerUpdates() {
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) {
|
||||
func (r *Reactor) broadcastTxRoutine(peerID p2ptypes.NodeID, closer *tmsync.Closer) {
|
||||
peerMempoolID := r.ids.GetForPeer(peerID)
|
||||
var nextGossipTx *clist.CElement
|
||||
|
||||
|
||||
@@ -14,23 +14,23 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/p2ptest"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
protomem "github.com/tendermint/tendermint/proto/tendermint/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
)
|
||||
|
||||
type reactorTestSuite struct {
|
||||
network *p2ptest.Network
|
||||
logger log.Logger
|
||||
|
||||
reactors map[types.NodeID]*Reactor
|
||||
mempoolChannels map[types.NodeID]*p2p.Channel
|
||||
mempools map[types.NodeID]*TxMempool
|
||||
kvstores map[types.NodeID]*kvstore.Application
|
||||
reactors map[p2ptypes.NodeID]*Reactor
|
||||
mempoolChannels map[p2ptypes.NodeID]*p2p.Channel
|
||||
mempools map[p2ptypes.NodeID]*TxMempool
|
||||
kvstores map[p2ptypes.NodeID]*kvstore.Application
|
||||
|
||||
peerChans map[types.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[types.NodeID]*p2p.PeerUpdates
|
||||
peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate
|
||||
peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates
|
||||
|
||||
nodes []types.NodeID
|
||||
nodes []p2ptypes.NodeID
|
||||
}
|
||||
|
||||
func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite {
|
||||
@@ -44,12 +44,12 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite {
|
||||
rts := &reactorTestSuite{
|
||||
logger: log.TestingLogger().With("testCase", t.Name()),
|
||||
network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}),
|
||||
reactors: make(map[types.NodeID]*Reactor, numNodes),
|
||||
mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes),
|
||||
mempools: make(map[types.NodeID]*TxMempool, numNodes),
|
||||
kvstores: make(map[types.NodeID]*kvstore.Application, numNodes),
|
||||
peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes),
|
||||
mempoolChannels: make(map[p2ptypes.NodeID]*p2p.Channel, numNodes),
|
||||
mempools: make(map[p2ptypes.NodeID]*TxMempool, numNodes),
|
||||
kvstores: make(map[p2ptypes.NodeID]*kvstore.Application, numNodes),
|
||||
peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numNodes),
|
||||
peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numNodes),
|
||||
}
|
||||
|
||||
chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)}
|
||||
|
||||
@@ -7,7 +7,7 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/libs/clist"
|
||||
tmsync "github.com/tendermint/tendermint/internal/libs/sync"
|
||||
"github.com/tendermint/tendermint/internal/mempool"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
types "github.com/tendermint/tendermint/pkg/mempool"
|
||||
)
|
||||
|
||||
// WrappedTx defines a wrapper around a raw transaction with additional metadata
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -31,7 +31,7 @@ var (
|
||||
// If the URL is opaque, i.e. of the form "scheme:opaque", then the opaque part
|
||||
// is expected to contain a node ID.
|
||||
type NodeAddress struct {
|
||||
NodeID types.NodeID
|
||||
NodeID p2p.NodeID
|
||||
Protocol Protocol
|
||||
Hostname string
|
||||
Port uint16
|
||||
@@ -58,13 +58,13 @@ func ParseNodeAddress(urlString string) (NodeAddress, error) {
|
||||
|
||||
// Opaque URLs are expected to contain only a node ID.
|
||||
if url.Opaque != "" {
|
||||
address.NodeID = types.NodeID(url.Opaque)
|
||||
address.NodeID = p2p.NodeID(url.Opaque)
|
||||
return address, address.Validate()
|
||||
}
|
||||
|
||||
// Otherwise, just parse a normal networked URL.
|
||||
if url.User != nil {
|
||||
address.NodeID = types.NodeID(strings.ToLower(url.User.Username()))
|
||||
address.NodeID = p2p.NodeID(strings.ToLower(url.User.Username()))
|
||||
}
|
||||
|
||||
address.Hostname = strings.ToLower(url.Hostname())
|
||||
|
||||
@@ -9,14 +9,14 @@ import (
|
||||
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
func TestNewNodeID(t *testing.T) {
|
||||
// Most tests are in TestNodeID_Validate, this just checks that it's validated.
|
||||
testcases := []struct {
|
||||
input string
|
||||
expect types.NodeID
|
||||
expect p2ptypes.NodeID
|
||||
ok bool
|
||||
}{
|
||||
{"", "", false},
|
||||
@@ -29,7 +29,7 @@ func TestNewNodeID(t *testing.T) {
|
||||
for _, tc := range testcases {
|
||||
tc := tc
|
||||
t.Run(tc.input, func(t *testing.T) {
|
||||
id, err := types.NewNodeID(tc.input)
|
||||
id, err := p2ptypes.NewNodeID(tc.input)
|
||||
if !tc.ok {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
@@ -42,14 +42,14 @@ func TestNewNodeID(t *testing.T) {
|
||||
|
||||
func TestNewNodeIDFromPubKey(t *testing.T) {
|
||||
privKey := ed25519.GenPrivKeyFromSecret([]byte("foo"))
|
||||
nodeID := types.NodeIDFromPubKey(privKey.PubKey())
|
||||
require.Equal(t, types.NodeID("045f5600654182cfeaccfe6cb19f0642e8a59898"), nodeID)
|
||||
nodeID := p2ptypes.NodeIDFromPubKey(privKey.PubKey())
|
||||
require.Equal(t, p2ptypes.NodeID("045f5600654182cfeaccfe6cb19f0642e8a59898"), nodeID)
|
||||
require.NoError(t, nodeID.Validate())
|
||||
}
|
||||
|
||||
func TestNodeID_Bytes(t *testing.T) {
|
||||
testcases := []struct {
|
||||
nodeID types.NodeID
|
||||
nodeID p2ptypes.NodeID
|
||||
expect []byte
|
||||
ok bool
|
||||
}{
|
||||
@@ -75,7 +75,7 @@ func TestNodeID_Bytes(t *testing.T) {
|
||||
|
||||
func TestNodeID_Validate(t *testing.T) {
|
||||
testcases := []struct {
|
||||
nodeID types.NodeID
|
||||
nodeID p2ptypes.NodeID
|
||||
ok bool
|
||||
}{
|
||||
{"", false},
|
||||
@@ -100,7 +100,7 @@ func TestNodeID_Validate(t *testing.T) {
|
||||
|
||||
func TestParseNodeAddress(t *testing.T) {
|
||||
user := "00112233445566778899aabbccddeeff00112233"
|
||||
id := types.NodeID(user)
|
||||
id := p2ptypes.NodeID(user)
|
||||
|
||||
testcases := []struct {
|
||||
url string
|
||||
@@ -202,7 +202,7 @@ func TestParseNodeAddress(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeAddress_Resolve(t *testing.T) {
|
||||
id := types.NodeID("00112233445566778899aabbccddeeff00112233")
|
||||
id := p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233")
|
||||
|
||||
testcases := []struct {
|
||||
address p2p.NodeAddress
|
||||
@@ -286,7 +286,7 @@ func TestNodeAddress_Resolve(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeAddress_String(t *testing.T) {
|
||||
id := types.NodeID("00112233445566778899aabbccddeeff00112233")
|
||||
id := p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233")
|
||||
user := string(id)
|
||||
testcases := []struct {
|
||||
address p2p.NodeAddress
|
||||
@@ -349,7 +349,7 @@ func TestNodeAddress_String(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNodeAddress_Validate(t *testing.T) {
|
||||
id := types.NodeID("00112233445566778899aabbccddeeff00112233")
|
||||
id := p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233")
|
||||
testcases := []struct {
|
||||
address p2p.NodeAddress
|
||||
ok bool
|
||||
|
||||
@@ -4,7 +4,7 @@ import (
|
||||
"fmt"
|
||||
"net"
|
||||
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// ErrFilterTimeout indicates that a filter operation timed out.
|
||||
@@ -20,7 +20,7 @@ type ErrRejected struct {
|
||||
addr NetAddress
|
||||
conn net.Conn
|
||||
err error
|
||||
id types.NodeID
|
||||
id p2p.NodeID
|
||||
isAuthFailure bool
|
||||
isDuplicate bool
|
||||
isFiltered bool
|
||||
@@ -101,7 +101,7 @@ func (e ErrRejected) IsSelf() bool { return e.isSelf }
|
||||
// ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known
|
||||
// ID.
|
||||
type ErrSwitchDuplicatePeerID struct {
|
||||
ID types.NodeID
|
||||
ID p2p.NodeID
|
||||
}
|
||||
|
||||
func (e ErrSwitchDuplicatePeerID) Error() string {
|
||||
@@ -129,7 +129,7 @@ func (e ErrSwitchConnectToSelf) Error() string {
|
||||
|
||||
type ErrSwitchAuthenticationFailure struct {
|
||||
Dialed *NetAddress
|
||||
Got types.NodeID
|
||||
Got p2p.NodeID
|
||||
}
|
||||
|
||||
func (e ErrSwitchAuthenticationFailure) Error() string {
|
||||
|
||||
@@ -6,13 +6,13 @@ import (
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/internal/p2p/conn"
|
||||
"github.com/tendermint/tendermint/libs/service"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
type Peer struct {
|
||||
*service.BaseService
|
||||
ip net.IP
|
||||
id types.NodeID
|
||||
id p2ptypes.NodeID
|
||||
addr *p2p.NetAddress
|
||||
kv map[string]interface{}
|
||||
Outbound, Persistent bool
|
||||
@@ -25,9 +25,9 @@ func NewPeer(ip net.IP) *Peer {
|
||||
if ip == nil {
|
||||
_, netAddr = p2p.CreateRoutableAddr()
|
||||
} else {
|
||||
netAddr = types.NewNetAddressIPPort(ip, 26656)
|
||||
netAddr = p2ptypes.NewNetAddressIPPort(ip, 26656)
|
||||
}
|
||||
nodeKey := types.GenNodeKey()
|
||||
nodeKey := p2ptypes.GenNodeKey()
|
||||
netAddr.ID = nodeKey.ID
|
||||
mp := &Peer{
|
||||
ip: ip,
|
||||
@@ -45,14 +45,14 @@ func NewPeer(ip net.IP) *Peer {
|
||||
func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error
|
||||
func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true }
|
||||
func (mp *Peer) NodeInfo() types.NodeInfo {
|
||||
return types.NodeInfo{
|
||||
func (mp *Peer) NodeInfo() p2ptypes.NodeInfo {
|
||||
return p2ptypes.NodeInfo{
|
||||
NodeID: mp.addr.ID,
|
||||
ListenAddr: mp.addr.DialString(),
|
||||
}
|
||||
}
|
||||
func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} }
|
||||
func (mp *Peer) ID() types.NodeID { return mp.id }
|
||||
func (mp *Peer) ID() p2ptypes.NodeID { return mp.id }
|
||||
func (mp *Peer) IsOutbound() bool { return mp.Outbound }
|
||||
func (mp *Peer) IsPersistent() bool { return mp.Persistent }
|
||||
func (mp *Peer) Get(key string) interface{} {
|
||||
|
||||
@@ -13,7 +13,7 @@ import (
|
||||
|
||||
p2p "github.com/tendermint/tendermint/internal/p2p"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
pkgp2p "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// Connection is an autogenerated mock type for the Connection type
|
||||
@@ -50,18 +50,18 @@ func (_m *Connection) FlushClose() error {
|
||||
}
|
||||
|
||||
// Handshake provides a mock function with given fields: _a0, _a1, _a2
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) {
|
||||
func (_m *Connection) Handshake(_a0 context.Context, _a1 pkgp2p.NodeInfo, _a2 crypto.PrivKey) (pkgp2p.NodeInfo, crypto.PubKey, error) {
|
||||
ret := _m.Called(_a0, _a1, _a2)
|
||||
|
||||
var r0 types.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok {
|
||||
var r0 pkgp2p.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func(context.Context, pkgp2p.NodeInfo, crypto.PrivKey) pkgp2p.NodeInfo); ok {
|
||||
r0 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeInfo)
|
||||
r0 = ret.Get(0).(pkgp2p.NodeInfo)
|
||||
}
|
||||
|
||||
var r1 crypto.PubKey
|
||||
if rf, ok := ret.Get(1).(func(context.Context, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
if rf, ok := ret.Get(1).(func(context.Context, pkgp2p.NodeInfo, crypto.PrivKey) crypto.PubKey); ok {
|
||||
r1 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
if ret.Get(1) != nil {
|
||||
@@ -70,7 +70,7 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 cry
|
||||
}
|
||||
|
||||
var r2 error
|
||||
if rf, ok := ret.Get(2).(func(context.Context, types.NodeInfo, crypto.PrivKey) error); ok {
|
||||
if rf, ok := ret.Get(2).(func(context.Context, pkgp2p.NodeInfo, crypto.PrivKey) error); ok {
|
||||
r2 = rf(_a0, _a1, _a2)
|
||||
} else {
|
||||
r2 = ret.Error(2)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
|
||||
net "net"
|
||||
|
||||
types "github.com/tendermint/tendermint/types"
|
||||
pkgp2p "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// Peer is an autogenerated mock type for the Peer type
|
||||
@@ -54,14 +54,14 @@ func (_m *Peer) Get(_a0 string) interface{} {
|
||||
}
|
||||
|
||||
// ID provides a mock function with given fields:
|
||||
func (_m *Peer) ID() types.NodeID {
|
||||
func (_m *Peer) ID() pkgp2p.NodeID {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.NodeID
|
||||
if rf, ok := ret.Get(0).(func() types.NodeID); ok {
|
||||
var r0 pkgp2p.NodeID
|
||||
if rf, ok := ret.Get(0).(func() pkgp2p.NodeID); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeID)
|
||||
r0 = ret.Get(0).(pkgp2p.NodeID)
|
||||
}
|
||||
|
||||
return r0
|
||||
@@ -110,14 +110,14 @@ func (_m *Peer) IsRunning() bool {
|
||||
}
|
||||
|
||||
// NodeInfo provides a mock function with given fields:
|
||||
func (_m *Peer) NodeInfo() types.NodeInfo {
|
||||
func (_m *Peer) NodeInfo() pkgp2p.NodeInfo {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 types.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func() types.NodeInfo); ok {
|
||||
var r0 pkgp2p.NodeInfo
|
||||
if rf, ok := ret.Get(0).(func() pkgp2p.NodeInfo); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
r0 = ret.Get(0).(types.NodeInfo)
|
||||
r0 = ret.Get(0).(pkgp2p.NodeInfo)
|
||||
}
|
||||
|
||||
return r0
|
||||
@@ -243,15 +243,15 @@ func (_m *Peer) SetLogger(_a0 log.Logger) {
|
||||
}
|
||||
|
||||
// SocketAddr provides a mock function with given fields:
|
||||
func (_m *Peer) SocketAddr() *types.NetAddress {
|
||||
func (_m *Peer) SocketAddr() *pkgp2p.NetAddress {
|
||||
ret := _m.Called()
|
||||
|
||||
var r0 *types.NetAddress
|
||||
if rf, ok := ret.Get(0).(func() *types.NetAddress); ok {
|
||||
var r0 *pkgp2p.NetAddress
|
||||
if rf, ok := ret.Get(0).(func() *pkgp2p.NetAddress); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*types.NetAddress)
|
||||
r0 = ret.Get(0).(*pkgp2p.NetAddress)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@
|
||||
package p2p
|
||||
|
||||
import (
|
||||
"github.com/tendermint/tendermint/types"
|
||||
"github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
type NetAddress = types.NetAddress
|
||||
type NetAddress = p2p.NetAddress
|
||||
|
||||
@@ -6,7 +6,7 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto"
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// Common setup for P2P tests.
|
||||
@@ -23,8 +23,8 @@ var (
|
||||
}
|
||||
|
||||
selfKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd})
|
||||
selfID = types.NodeIDFromPubKey(selfKey.PubKey())
|
||||
selfInfo = types.NodeInfo{
|
||||
selfID = p2ptypes.NodeIDFromPubKey(selfKey.PubKey())
|
||||
selfInfo = p2ptypes.NodeInfo{
|
||||
NodeID: selfID,
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
Network: "test",
|
||||
@@ -33,8 +33,8 @@ var (
|
||||
}
|
||||
|
||||
peerKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0x84, 0xd7, 0x01, 0xbf, 0x83, 0x20, 0x1c, 0xfe})
|
||||
peerID = types.NodeIDFromPubKey(peerKey.PubKey())
|
||||
peerInfo = types.NodeInfo{
|
||||
peerID = p2ptypes.NodeIDFromPubKey(peerKey.PubKey())
|
||||
peerInfo = p2ptypes.NodeInfo{
|
||||
NodeID: peerID,
|
||||
ListenAddr: "0.0.0.0:0",
|
||||
Network: "test",
|
||||
|
||||
@@ -14,14 +14,14 @@ import (
|
||||
"github.com/tendermint/tendermint/crypto/ed25519"
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/libs/log"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// Network sets up an in-memory network that can be used for high-level P2P
|
||||
// testing. It creates an arbitrary number of nodes that are connected to each
|
||||
// other, and can open channels across all nodes with custom reactors.
|
||||
type Network struct {
|
||||
Nodes map[types.NodeID]*Node
|
||||
Nodes map[p2ptypes.NodeID]*Node
|
||||
|
||||
logger log.Logger
|
||||
memoryNetwork *p2p.MemoryNetwork
|
||||
@@ -52,7 +52,7 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network {
|
||||
opts.setDefaults()
|
||||
logger := log.TestingLogger()
|
||||
network := &Network{
|
||||
Nodes: map[types.NodeID]*Node{},
|
||||
Nodes: map[p2ptypes.NodeID]*Node{},
|
||||
logger: logger,
|
||||
memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize),
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func (n *Network) Start(t *testing.T) {
|
||||
// Set up a list of node addresses to dial, and a peer update subscription
|
||||
// for each node.
|
||||
dialQueue := []p2p.NodeAddress{}
|
||||
subs := map[types.NodeID]*p2p.PeerUpdates{}
|
||||
subs := map[p2ptypes.NodeID]*p2p.PeerUpdates{}
|
||||
for _, node := range n.Nodes {
|
||||
dialQueue = append(dialQueue, node.NodeAddress)
|
||||
subs[node.NodeID] = node.PeerManager.Subscribe()
|
||||
@@ -125,8 +125,8 @@ func (n *Network) Start(t *testing.T) {
|
||||
}
|
||||
|
||||
// NodeIDs returns the network's node IDs.
|
||||
func (n *Network) NodeIDs() []types.NodeID {
|
||||
ids := []types.NodeID{}
|
||||
func (n *Network) NodeIDs() []p2ptypes.NodeID {
|
||||
ids := []p2ptypes.NodeID{}
|
||||
for id := range n.Nodes {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
@@ -140,8 +140,8 @@ func (n *Network) MakeChannels(
|
||||
chDesc p2p.ChannelDescriptor,
|
||||
messageType proto.Message,
|
||||
size int,
|
||||
) map[types.NodeID]*p2p.Channel {
|
||||
channels := map[types.NodeID]*p2p.Channel{}
|
||||
) map[p2ptypes.NodeID]*p2p.Channel {
|
||||
channels := map[p2ptypes.NodeID]*p2p.Channel{}
|
||||
for _, node := range n.Nodes {
|
||||
channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size)
|
||||
}
|
||||
@@ -156,8 +156,8 @@ func (n *Network) MakeChannelsNoCleanup(
|
||||
chDesc p2p.ChannelDescriptor,
|
||||
messageType proto.Message,
|
||||
size int,
|
||||
) map[types.NodeID]*p2p.Channel {
|
||||
channels := map[types.NodeID]*p2p.Channel{}
|
||||
) map[p2ptypes.NodeID]*p2p.Channel {
|
||||
channels := map[p2ptypes.NodeID]*p2p.Channel{}
|
||||
for _, node := range n.Nodes {
|
||||
channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size)
|
||||
}
|
||||
@@ -174,7 +174,7 @@ func (n *Network) RandomNode() *Node {
|
||||
}
|
||||
|
||||
// Peers returns a node's peers (i.e. everyone except itself).
|
||||
func (n *Network) Peers(id types.NodeID) []*Node {
|
||||
func (n *Network) Peers(id p2ptypes.NodeID) []*Node {
|
||||
peers := make([]*Node, 0, len(n.Nodes)-1)
|
||||
for _, peer := range n.Nodes {
|
||||
if peer.NodeID != id {
|
||||
@@ -186,7 +186,7 @@ func (n *Network) Peers(id types.NodeID) []*Node {
|
||||
|
||||
// Remove removes a node from the network, stopping it and waiting for all other
|
||||
// nodes to pick up the disconnection.
|
||||
func (n *Network) Remove(t *testing.T, id types.NodeID) {
|
||||
func (n *Network) Remove(t *testing.T, id p2ptypes.NodeID) {
|
||||
require.Contains(t, n.Nodes, id)
|
||||
node := n.Nodes[id]
|
||||
delete(n.Nodes, id)
|
||||
@@ -214,8 +214,8 @@ func (n *Network) Remove(t *testing.T, id types.NodeID) {
|
||||
|
||||
// Node is a node in a Network, with a Router and a PeerManager.
|
||||
type Node struct {
|
||||
NodeID types.NodeID
|
||||
NodeInfo types.NodeInfo
|
||||
NodeID p2ptypes.NodeID
|
||||
NodeInfo p2ptypes.NodeInfo
|
||||
NodeAddress p2p.NodeAddress
|
||||
PrivKey crypto.PrivKey
|
||||
Router *p2p.Router
|
||||
@@ -228,8 +228,8 @@ type Node struct {
|
||||
// network. Callers are responsible for updating peering relationships.
|
||||
func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node {
|
||||
privKey := ed25519.GenPrivKey()
|
||||
nodeID := types.NodeIDFromPubKey(privKey.PubKey())
|
||||
nodeInfo := types.NodeInfo{
|
||||
nodeID := p2ptypes.NodeIDFromPubKey(privKey.PubKey())
|
||||
nodeInfo := p2ptypes.NodeInfo{
|
||||
NodeID: nodeID,
|
||||
ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now.
|
||||
Moniker: string(nodeID),
|
||||
|
||||
@@ -8,7 +8,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/tendermint/tendermint/internal/p2p"
|
||||
"github.com/tendermint/tendermint/types"
|
||||
p2ptypes "github.com/tendermint/tendermint/pkg/p2p"
|
||||
)
|
||||
|
||||
// RequireEmpty requires that the given channel is empty.
|
||||
@@ -84,7 +84,7 @@ func RequireSend(t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) {
|
||||
func RequireSendReceive(
|
||||
t *testing.T,
|
||||
channel *p2p.Channel,
|
||||
peerID types.NodeID,
|
||||
peerID p2ptypes.NodeID,
|
||||
send proto.Message,
|
||||
receive proto.Message,
|
||||
) {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user