From 68c54f067671c44ecbf93dfa68237b79549d2ccf Mon Sep 17 00:00:00 2001 From: Callum Waters Date: Tue, 24 Aug 2021 13:26:01 +0200 Subject: [PATCH] make the rest of the code compile --- abci/client/client.go | 66 ++--- abci/client/grpc_client.go | 140 ++++----- abci/client/local_client.go | 2 +- abci/client/mocks/client.go | 199 +++++++------ abci/client/socket_client.go | 208 ++++++------- abci/client/socket_client_test.go | 14 +- abci/cmd/abci-cli/abci-cli.go | 14 +- abci/example/example_test.go | 22 +- abci/example/kvstore/helpers.go | 12 +- abci/example/kvstore/kvstore.go | 28 +- abci/example/kvstore/kvstore_test.go | 64 ++-- abci/example/kvstore/persistent_kvstore.go | 78 ++--- abci/server/grpc_server.go | 8 +- abci/server/server.go | 6 +- abci/server/socket_server.go | 82 ++--- abci/tests/benchmarks/parallel/parallel.go | 10 +- abci/tests/benchmarks/simple/simple.go | 20 +- abci/tests/server/client.go | 12 +- cmd/tendermint/commands/gen_node_key.go | 4 +- cmd/tendermint/commands/gen_validator.go | 4 +- cmd/tendermint/commands/init.go | 17 +- cmd/tendermint/commands/reindex_event.go | 8 +- .../commands/reset_priv_validator.go | 4 +- cmd/tendermint/commands/testnet.go | 16 +- config/config.go | 12 +- docs/tutorials/go-built-in.md | 4 +- docs/tutorials/go.md | 2 +- internal/blocksync/msgs.go | 4 +- internal/blocksync/v0/pool.go | 47 +-- internal/blocksync/v0/pool_test.go | 20 +- internal/blocksync/v0/reactor.go | 14 +- internal/blocksync/v0/reactor_test.go | 50 ++-- .../v2/internal/behavior/peer_behaviour.go | 12 +- .../v2/internal/behavior/reporter.go | 8 +- .../v2/internal/behavior/reporter_test.go | 16 +- internal/blocksync/v2/io.go | 6 +- internal/blocksync/v2/processor.go | 22 +- internal/blocksync/v2/processor_context.go | 27 +- internal/blocksync/v2/processor_test.go | 12 +- internal/blocksync/v2/reactor.go | 24 +- internal/blocksync/v2/reactor_test.go | 49 +-- internal/blocksync/v2/scheduler.go | 57 ++-- internal/blocksync/v2/scheduler_test.go | 216 +++++++------- internal/consensus/byzantine_test.go | 34 ++- internal/consensus/common_test.go | 85 +++--- internal/consensus/invalid_test.go | 14 +- internal/consensus/mempool_test.go | 21 +- internal/consensus/metrics.go | 4 +- internal/consensus/msgs.go | 53 ++-- internal/consensus/msgs_test.go | 63 ++-- internal/consensus/peer_state.go | 33 ++- internal/consensus/reactor.go | 52 +++- internal/consensus/reactor_test.go | 86 +++--- internal/consensus/replay.go | 42 +-- internal/consensus/replay_file.go | 18 +- internal/consensus/replay_stubs.go | 12 +- internal/consensus/replay_test.go | 127 ++++---- internal/consensus/state.go | 97 +++--- internal/consensus/state_test.go | 279 +++++++++--------- internal/consensus/types/height_vote_set.go | 48 +-- .../consensus/types/height_vote_set_test.go | 9 +- internal/consensus/types/peer_round_state.go | 10 +- internal/consensus/types/round_state.go | 67 +++-- internal/consensus/wal_generator.go | 7 +- internal/consensus/wal_test.go | 11 +- internal/evidence/mocks/block_store.go | 21 +- internal/evidence/pool.go | 9 +- internal/evidence/pool_test.go | 68 +++-- internal/evidence/reactor.go | 9 +- internal/evidence/reactor_test.go | 62 ++-- internal/evidence/services.go | 7 +- internal/evidence/verify.go | 22 +- internal/evidence/verify_test.go | 114 +++---- internal/mempool/cache.go | 2 +- internal/mempool/ids.go | 12 +- internal/mempool/ids_test.go | 4 +- internal/mempool/mempool.go | 7 +- internal/mempool/mock/mempool.go | 4 +- internal/mempool/tx.go | 9 +- internal/mempool/v0/cache_test.go | 4 +- internal/mempool/v0/clist_mempool.go | 12 +- internal/mempool/v0/clist_mempool_test.go | 4 +- internal/mempool/v0/reactor.go | 11 +- internal/mempool/v0/reactor_test.go | 39 +-- internal/mempool/v1/mempool.go | 7 +- internal/mempool/v1/mempool_test.go | 4 +- internal/mempool/v1/reactor.go | 11 +- internal/mempool/v1/reactor_test.go | 28 +- internal/mempool/v1/tx.go | 2 +- internal/p2p/address.go | 8 +- internal/p2p/address_test.go | 22 +- internal/p2p/errors.go | 8 +- internal/p2p/mock/peer.go | 14 +- internal/p2p/mocks/connection.go | 14 +- internal/p2p/mocks/peer.go | 26 +- internal/p2p/netaddress.go | 4 +- internal/p2p/p2p_test.go | 10 +- internal/p2p/p2ptest/network.go | 32 +- internal/p2p/p2ptest/require.go | 4 +- internal/p2p/p2ptest/util.go | 4 +- internal/p2p/peer.go | 14 +- internal/p2p/peer_set.go | 14 +- internal/p2p/peer_set_test.go | 10 +- internal/p2p/peer_test.go | 14 +- internal/p2p/peermanager.go | 98 +++--- internal/p2p/peermanager_scoring_test.go | 6 +- internal/p2p/peermanager_test.go | 260 ++++++++-------- internal/p2p/pex/addrbook.go | 20 +- internal/p2p/pex/addrbook_test.go | 16 +- internal/p2p/pex/bench_test.go | 8 +- internal/p2p/pex/known_address.go | 4 +- internal/p2p/pex/pex_reactor.go | 22 +- internal/p2p/pex/pex_reactor_test.go | 8 +- internal/p2p/pex/reactor.go | 22 +- internal/p2p/pex/reactor_test.go | 34 +-- internal/p2p/router.go | 42 +-- internal/p2p/router_init_test.go | 12 +- internal/p2p/router_test.go | 34 +-- internal/p2p/shim_test.go | 6 +- internal/p2p/switch.go | 44 +-- internal/p2p/switch_test.go | 4 +- internal/p2p/test_util.go | 20 +- internal/p2p/transport.go | 14 +- internal/p2p/transport_mconn.go | 30 +- internal/p2p/transport_memory.go | 42 +-- internal/p2p/transport_memory_test.go | 4 +- internal/p2p/transport_test.go | 24 +- internal/statesync/block_queue.go | 9 +- internal/statesync/block_queue_test.go | 16 +- internal/statesync/chunks.go | 12 +- internal/statesync/chunks_test.go | 32 +- internal/statesync/dispatcher.go | 50 ++-- internal/statesync/dispatcher_test.go | 13 +- internal/statesync/mocks/state_provider.go | 12 +- internal/statesync/reactor.go | 16 +- internal/statesync/reactor_test.go | 57 ++-- internal/statesync/snapshots.go | 30 +- internal/statesync/snapshots_test.go | 38 +-- internal/statesync/stateprovider.go | 6 +- internal/statesync/syncer.go | 17 +- internal/statesync/syncer_test.go | 44 +-- libs/pubsub/example_test.go | 2 +- libs/pubsub/pubsub.go | 12 +- libs/pubsub/pubsub_test.go | 2 +- libs/pubsub/query/empty.go | 6 +- libs/pubsub/query/empty_test.go | 2 +- libs/pubsub/query/query.go | 6 +- libs/pubsub/query/query_test.go | 2 +- libs/pubsub/subscription.go | 8 +- light/client.go | 13 +- light/client_benchmark_test.go | 11 +- light/client_test.go | 106 +++---- light/detector.go | 30 +- light/detector_test.go | 62 ++-- light/errors.go | 7 +- light/helpers_test.go | 69 ++--- light/light_test.go | 4 +- light/mbt/driver_test.go | 17 +- light/provider/http/http.go | 15 +- light/provider/http/http_test.go | 4 +- light/provider/mocks/provider.go | 17 +- light/provider/provider.go | 7 +- light/proxy/routes.go | 19 +- light/rpc/client.go | 18 +- light/rpc/mocks/light_client.go | 27 +- light/store/db/db.go | 12 +- light/store/db/db_test.go | 15 +- light/store/store.go | 8 +- light/verifier.go | 39 +-- light/verifier_test.go | 17 +- node/node.go | 56 ++-- node/node_test.go | 70 ++--- node/public.go | 9 +- node/setup.go | 64 ++-- privval/file.go | 16 +- privval/file_test.go | 47 +-- privval/grpc/client.go | 4 +- privval/grpc/client_test.go | 27 +- privval/grpc/server.go | 6 +- privval/grpc/server_test.go | 49 +-- privval/msgs_test.go | 19 +- privval/retry_signer_client.go | 4 +- privval/signer_client.go | 4 +- privval/signer_client_test.go | 67 ++--- privval/signer_listener_endpoint_test.go | 6 +- privval/signer_requestHandler.go | 4 +- privval/signer_server.go | 8 +- proto/tendermint/abci/types.proto | 2 +- proto/tendermint/blocksync/message_test.go | 5 +- proto/tendermint/state/types.pb.go | 2 +- proxy/app_conn.go | 68 ++--- proxy/app_conn_test.go | 8 +- proxy/client.go | 8 +- proxy/mocks/app_conn_consensus.go | 51 ++-- proxy/mocks/app_conn_mempool.go | 23 +- proxy/mocks/app_conn_query.go | 32 +- proxy/mocks/app_conn_snapshot.go | 44 +-- proxy/version.go | 2 +- rpc/client/event_test.go | 4 +- rpc/client/evidence_test.go | 22 +- rpc/client/helpers.go | 8 +- rpc/client/http/http.go | 15 +- rpc/client/interface.go | 13 +- rpc/client/local/local.go | 24 +- rpc/client/mock/abci.go | 22 +- rpc/client/mock/abci_test.go | 12 +- rpc/client/mock/client.go | 13 +- rpc/client/mocks/client.go | 36 +-- rpc/client/rpc_test.go | 21 +- rpc/core/abci.go | 2 +- rpc/core/blocks.go | 9 +- rpc/core/blocks_test.go | 29 +- rpc/core/consensus.go | 4 +- rpc/core/env.go | 8 +- rpc/core/evidence.go | 4 +- rpc/core/mempool.go | 9 +- rpc/core/status.go | 4 +- rpc/core/tx.go | 8 +- rpc/core/types/responses.go | 59 ++-- rpc/core/types/responses_test.go | 14 +- rpc/grpc/api.go | 2 +- rpc/grpc/types.pb.go | 2 +- rpc/openapi/openapi.yaml | 2 +- rpc/test/helpers.go | 2 +- scripts/json2wal/main.go | 4 +- state/execution.go | 67 +++-- state/execution_test.go | 113 +++---- state/export_test.go | 13 +- state/helpers_test.go | 98 +++--- state/indexer/block/kv/kv.go | 14 +- state/indexer/block/kv/kv_test.go | 13 +- state/indexer/block/kv/util.go | 6 +- state/indexer/block/null/null.go | 4 +- state/indexer/eventsink.go | 6 +- state/indexer/indexer.go | 6 +- state/indexer/indexer_service.go | 14 +- state/indexer/indexer_service_test.go | 16 +- state/indexer/mocks/event_sink.go | 35 +-- state/indexer/sink/kv/kv.go | 6 +- state/indexer/sink/kv/kv_test.go | 32 +- state/indexer/sink/null/null.go | 6 +- state/indexer/sink/null/null_test.go | 4 +- state/indexer/sink/psql/psql.go | 7 +- state/indexer/sink/psql/psql_test.go | 14 +- state/indexer/tx/kv/kv.go | 17 +- state/indexer/tx/kv/kv_bench_test.go | 6 +- state/indexer/tx/kv/kv_test.go | 26 +- state/indexer/tx/null/null.go | 2 +- state/mocks/block_store.go | 67 ++--- state/mocks/event_sink.go | 167 ----------- state/mocks/evidence_pool.go | 22 +- state/mocks/store.go | 24 +- state/services.go | 39 +-- state/state.go | 68 +++-- state/state_test.go | 127 ++++---- state/store.go | 30 +- state/store_test.go | 28 +- state/test/factory/block.go | 19 +- state/tx_filter.go | 4 +- state/tx_filter_test.go | 10 +- state/validation.go | 7 +- state/validation_test.go | 53 ++-- store/store.go | 29 +- store/store_test.go | 55 ++-- test/e2e/app/app.go | 2 +- test/e2e/app/snapshots.go | 2 +- test/e2e/generator/generate.go | 4 +- test/e2e/pkg/testnet.go | 4 +- test/e2e/runner/benchmark.go | 10 +- test/e2e/runner/evidence.go | 65 ++-- test/e2e/runner/load.go | 12 +- test/e2e/runner/rpc.go | 5 +- test/e2e/runner/setup.go | 17 +- test/e2e/tests/app_test.go | 4 +- test/e2e/tests/e2e_test.go | 8 +- test/e2e/tests/validator_test.go | 14 +- test/fuzz/p2p/addrbook/init-corpus/main.go | 2 +- test/fuzz/p2p/pex/init-corpus/main.go | 2 +- test/fuzz/p2p/pex/reactor_receive.go | 2 +- .../internal/test_harness.go | 19 +- .../internal/test_harness_test.go | 4 +- types/results.go | 54 ---- types/results_test.go | 54 ---- 283 files changed, 3874 insertions(+), 3906 deletions(-) delete mode 100644 state/mocks/event_sink.go delete mode 100644 types/results.go delete mode 100644 types/results_test.go diff --git a/abci/client/client.go b/abci/client/client.go index b6d34e422..e3b956b68 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -5,9 +5,9 @@ import ( "fmt" "sync" - "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) const ( @@ -34,34 +34,34 @@ type Client interface { // Asynchronous requests FlushAsync(context.Context) (*ReqRes, error) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) - InfoAsync(context.Context, types.RequestInfo) (*ReqRes, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*ReqRes, error) - CheckTxAsync(context.Context, types.RequestCheckTx) (*ReqRes, error) - QueryAsync(context.Context, types.RequestQuery) (*ReqRes, error) + InfoAsync(context.Context, abci.RequestInfo) (*ReqRes, error) + DeliverTxAsync(context.Context, abci.RequestDeliverTx) (*ReqRes, error) + CheckTxAsync(context.Context, abci.RequestCheckTx) (*ReqRes, error) + QueryAsync(context.Context, abci.RequestQuery) (*ReqRes, error) CommitAsync(context.Context) (*ReqRes, error) - InitChainAsync(context.Context, types.RequestInitChain) (*ReqRes, error) - BeginBlockAsync(context.Context, types.RequestBeginBlock) (*ReqRes, error) - EndBlockAsync(context.Context, types.RequestEndBlock) (*ReqRes, error) - ListSnapshotsAsync(context.Context, types.RequestListSnapshots) (*ReqRes, error) - OfferSnapshotAsync(context.Context, types.RequestOfferSnapshot) (*ReqRes, error) - LoadSnapshotChunkAsync(context.Context, types.RequestLoadSnapshotChunk) (*ReqRes, error) - ApplySnapshotChunkAsync(context.Context, types.RequestApplySnapshotChunk) (*ReqRes, error) + InitChainAsync(context.Context, abci.RequestInitChain) (*ReqRes, error) + BeginBlockAsync(context.Context, abci.RequestBeginBlock) (*ReqRes, error) + EndBlockAsync(context.Context, abci.RequestEndBlock) (*ReqRes, error) + ListSnapshotsAsync(context.Context, abci.RequestListSnapshots) (*ReqRes, error) + OfferSnapshotAsync(context.Context, abci.RequestOfferSnapshot) (*ReqRes, error) + LoadSnapshotChunkAsync(context.Context, abci.RequestLoadSnapshotChunk) (*ReqRes, error) + ApplySnapshotChunkAsync(context.Context, abci.RequestApplySnapshotChunk) (*ReqRes, error) // Synchronous requests FlushSync(context.Context) error - EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) - DeliverTxSync(context.Context, types.RequestDeliverTx) (*types.ResponseDeliverTx, error) - CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) - QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error) - CommitSync(context.Context) (*types.ResponseCommit, error) - InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) - BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) - ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) + EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) + InfoSync(context.Context, abci.RequestInfo) (*abci.ResponseInfo, error) + DeliverTxSync(context.Context, abci.RequestDeliverTx) (*abci.ResponseDeliverTx, error) + CheckTxSync(context.Context, abci.RequestCheckTx) (*abci.ResponseCheckTx, error) + QuerySync(context.Context, abci.RequestQuery) (*abci.ResponseQuery, error) + CommitSync(context.Context) (*abci.ResponseCommit, error) + InitChainSync(context.Context, abci.RequestInitChain) (*abci.ResponseInitChain, error) + BeginBlockSync(context.Context, abci.RequestBeginBlock) (*abci.ResponseBeginBlock, error) + EndBlockSync(context.Context, abci.RequestEndBlock) (*abci.ResponseEndBlock, error) + ListSnapshotsSync(context.Context, abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) + OfferSnapshotSync(context.Context, abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(context.Context, abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(context.Context, abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) } //---------------------------------------- @@ -80,19 +80,19 @@ func NewClient(addr, transport string, mustConnect bool) (client Client, err err return } -type Callback func(*types.Request, *types.Response) +type Callback func(*abci.Request, *abci.Response) type ReqRes struct { - *types.Request + *abci.Request *sync.WaitGroup - *types.Response // Not set atomically, so be sure to use WaitGroup. + *abci.Response // Not set atomically, so be sure to use WaitGroup. mtx tmsync.RWMutex - done bool // Gets set to true once *after* WaitGroup.Done(). - cb func(*types.Response) // A single callback that may be set. + done bool // Gets set to true once *after* WaitGroup.Done(). + cb func(*abci.Response) // A single callback that may be set. } -func NewReqRes(req *types.Request) *ReqRes { +func NewReqRes(req *abci.Request) *ReqRes { return &ReqRes{ Request: req, WaitGroup: waitGroup1(), @@ -106,7 +106,7 @@ func NewReqRes(req *types.Request) *ReqRes { // Sets sets the callback. If reqRes is already done, it will call the cb // immediately. Note, reqRes.cb should not change if reqRes.done and only one // callback is supported. -func (r *ReqRes) SetCallback(cb func(res *types.Response)) { +func (r *ReqRes) SetCallback(cb func(res *abci.Response)) { r.mtx.Lock() if r.done { @@ -136,7 +136,7 @@ func (r *ReqRes) InvokeCallback() { // will invoke the callback twice and create a potential race condition. // // ref: https://github.com/tendermint/tendermint/issues/5439 -func (r *ReqRes) GetCallback() func(*types.Response) { +func (r *ReqRes) GetCallback() func(*abci.Response) { r.mtx.RLock() defer r.mtx.RUnlock() return r.cb diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 31bd6fae1..488275bdd 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -9,10 +9,10 @@ import ( "google.golang.org/grpc" - "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) // A gRPC client. @@ -20,14 +20,14 @@ type grpcClient struct { service.BaseService mustConnect bool - client types.ABCIApplicationClient + client abci.ABCIApplicationClient conn *grpc.ClientConn chReqRes chan *ReqRes // dispatches "async" responses to callbacks *in order*, needed by mempool mtx tmsync.RWMutex addr string err error - resCb func(*types.Request, *types.Response) // listens to all callbacks + resCb func(*abci.Request, *abci.Response) // listens to all callbacks } var _ Client = (*grpcClient)(nil) @@ -106,12 +106,12 @@ RETRY_LOOP: } cli.Logger.Info("Dialed server. Waiting for echo.", "addr", cli.addr) - client := types.NewABCIApplicationClient(conn) + client := abci.NewABCIApplicationClient(conn) cli.conn = conn ENSURE_CONNECTED: for { - _, err := client.Echo(context.Background(), &types.RequestEcho{Message: "hello"}, grpc.WaitForReady(true)) + _, err := client.Echo(context.Background(), &abci.RequestEcho{Message: "hello"}, grpc.WaitForReady(true)) if err == nil { break ENSURE_CONNECTED } @@ -166,143 +166,143 @@ func (cli *grpcClient) SetResponseCallback(resCb Callback) { // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { - req := types.ToRequestEcho(msg) + req := abci.ToRequestEcho(msg) res, err := cli.client.Echo(ctx, req.GetEcho(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Echo{Echo: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Echo{Echo: res}}) } // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) FlushAsync(ctx context.Context) (*ReqRes, error) { - req := types.ToRequestFlush() + req := abci.ToRequestFlush() res, err := cli.client.Flush(ctx, req.GetFlush(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Flush{Flush: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Flush{Flush: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) InfoAsync(ctx context.Context, params types.RequestInfo) (*ReqRes, error) { - req := types.ToRequestInfo(params) +func (cli *grpcClient) InfoAsync(ctx context.Context, params abci.RequestInfo) (*ReqRes, error) { + req := abci.ToRequestInfo(params) res, err := cli.client.Info(ctx, req.GetInfo(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Info{Info: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Info{Info: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params types.RequestDeliverTx) (*ReqRes, error) { - req := types.ToRequestDeliverTx(params) +func (cli *grpcClient) DeliverTxAsync(ctx context.Context, params abci.RequestDeliverTx) (*ReqRes, error) { + req := abci.ToRequestDeliverTx(params) res, err := cli.client.DeliverTx(ctx, req.GetDeliverTx(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_DeliverTx{DeliverTx: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_DeliverTx{DeliverTx: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) CheckTxAsync(ctx context.Context, params types.RequestCheckTx) (*ReqRes, error) { - req := types.ToRequestCheckTx(params) +func (cli *grpcClient) CheckTxAsync(ctx context.Context, params abci.RequestCheckTx) (*ReqRes, error) { + req := abci.ToRequestCheckTx(params) res, err := cli.client.CheckTx(ctx, req.GetCheckTx(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_CheckTx{CheckTx: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_CheckTx{CheckTx: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) QueryAsync(ctx context.Context, params types.RequestQuery) (*ReqRes, error) { - req := types.ToRequestQuery(params) +func (cli *grpcClient) QueryAsync(ctx context.Context, params abci.RequestQuery) (*ReqRes, error) { + req := abci.ToRequestQuery(params) res, err := cli.client.Query(ctx, req.GetQuery(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Query{Query: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Query{Query: res}}) } // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) CommitAsync(ctx context.Context) (*ReqRes, error) { - req := types.ToRequestCommit() + req := abci.ToRequestCommit() res, err := cli.client.Commit(ctx, req.GetCommit(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_Commit{Commit: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_Commit{Commit: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) InitChainAsync(ctx context.Context, params types.RequestInitChain) (*ReqRes, error) { - req := types.ToRequestInitChain(params) +func (cli *grpcClient) InitChainAsync(ctx context.Context, params abci.RequestInitChain) (*ReqRes, error) { + req := abci.ToRequestInitChain(params) res, err := cli.client.InitChain(ctx, req.GetInitChain(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_InitChain{InitChain: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_InitChain{InitChain: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params types.RequestBeginBlock) (*ReqRes, error) { - req := types.ToRequestBeginBlock(params) +func (cli *grpcClient) BeginBlockAsync(ctx context.Context, params abci.RequestBeginBlock) (*ReqRes, error) { + req := abci.ToRequestBeginBlock(params) res, err := cli.client.BeginBlock(ctx, req.GetBeginBlock(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_BeginBlock{BeginBlock: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_BeginBlock{BeginBlock: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) EndBlockAsync(ctx context.Context, params types.RequestEndBlock) (*ReqRes, error) { - req := types.ToRequestEndBlock(params) +func (cli *grpcClient) EndBlockAsync(ctx context.Context, params abci.RequestEndBlock) (*ReqRes, error) { + req := abci.ToRequestEndBlock(params) res, err := cli.client.EndBlock(ctx, req.GetEndBlock(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_EndBlock{EndBlock: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params types.RequestListSnapshots) (*ReqRes, error) { - req := types.ToRequestListSnapshots(params) +func (cli *grpcClient) ListSnapshotsAsync(ctx context.Context, params abci.RequestListSnapshots) (*ReqRes, error) { + req := abci.ToRequestListSnapshots(params) res, err := cli.client.ListSnapshots(ctx, req.GetListSnapshots(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_ListSnapshots{ListSnapshots: res}}) } // NOTE: call is synchronous, use ctx to break early if needed -func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params types.RequestOfferSnapshot) (*ReqRes, error) { - req := types.ToRequestOfferSnapshot(params) +func (cli *grpcClient) OfferSnapshotAsync(ctx context.Context, params abci.RequestOfferSnapshot) (*ReqRes, error) { + req := abci.ToRequestOfferSnapshot(params) res, err := cli.client.OfferSnapshot(ctx, req.GetOfferSnapshot(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_OfferSnapshot{OfferSnapshot: res}}) } // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) LoadSnapshotChunkAsync( ctx context.Context, - params types.RequestLoadSnapshotChunk, + params abci.RequestLoadSnapshotChunk, ) (*ReqRes, error) { - req := types.ToRequestLoadSnapshotChunk(params) + req := abci.ToRequestLoadSnapshotChunk(params) res, err := cli.client.LoadSnapshotChunk(ctx, req.GetLoadSnapshotChunk(), grpc.WaitForReady(true)) if err != nil { return nil, err } - return cli.finishAsyncCall(ctx, req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}}) + return cli.finishAsyncCall(ctx, req, &abci.Response{Value: &abci.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}}) } // NOTE: call is synchronous, use ctx to break early if needed func (cli *grpcClient) ApplySnapshotChunkAsync( ctx context.Context, - params types.RequestApplySnapshotChunk, + params abci.RequestApplySnapshotChunk, ) (*ReqRes, error) { - req := types.ToRequestApplySnapshotChunk(params) + req := abci.ToRequestApplySnapshotChunk(params) res, err := cli.client.ApplySnapshotChunk(ctx, req.GetApplySnapshotChunk(), grpc.WaitForReady(true)) if err != nil { return nil, err @@ -310,13 +310,13 @@ func (cli *grpcClient) ApplySnapshotChunkAsync( return cli.finishAsyncCall( ctx, req, - &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}}, + &abci.Response{Value: &abci.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}}, ) } // finishAsyncCall creates a ReqRes for an async call, and immediately populates it // with the response. We don't complete it until it's been ordered via the channel. -func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, res *types.Response) (*ReqRes, error) { +func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *abci.Request, res *abci.Response) (*ReqRes, error) { reqres := NewReqRes(req) reqres.Response = res select { @@ -330,7 +330,7 @@ func (cli *grpcClient) finishAsyncCall(ctx context.Context, req *types.Request, // finishSyncCall waits for an async call to complete. It is necessary to call all // sync calls asynchronously as well, to maintain call and response ordering via // the channel, and this method will wait until the async call completes. -func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response { +func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *abci.Response { // It's possible that the callback is called twice, since the callback can // be called immediately on SetCallback() in addition to after it has been // set. This is because completing the ReqRes happens in a separate critical @@ -346,8 +346,8 @@ func (cli *grpcClient) finishSyncCall(reqres *ReqRes) *types.Response { // ReqRes should really handle callback dispatch internally, to guarantee // that it's only called once and avoid the above race conditions. var once sync.Once - ch := make(chan *types.Response, 1) - reqres.SetCallback(func(res *types.Response) { + ch := make(chan *abci.Response, 1) + reqres.SetCallback(func(res *abci.Response) { once.Do(func() { ch <- res }) @@ -361,7 +361,7 @@ func (cli *grpcClient) FlushSync(ctx context.Context) error { return nil } -func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { +func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) { reqres, err := cli.EchoAsync(ctx, msg) if err != nil { return nil, err @@ -371,8 +371,8 @@ func (cli *grpcClient) EchoSync(ctx context.Context, msg string) (*types.Respons func (cli *grpcClient) InfoSync( ctx context.Context, - req types.RequestInfo, -) (*types.ResponseInfo, error) { + req abci.RequestInfo, +) (*abci.ResponseInfo, error) { reqres, err := cli.InfoAsync(ctx, req) if err != nil { return nil, err @@ -382,8 +382,8 @@ func (cli *grpcClient) InfoSync( func (cli *grpcClient) DeliverTxSync( ctx context.Context, - params types.RequestDeliverTx, -) (*types.ResponseDeliverTx, error) { + params abci.RequestDeliverTx, +) (*abci.ResponseDeliverTx, error) { reqres, err := cli.DeliverTxAsync(ctx, params) if err != nil { @@ -394,8 +394,8 @@ func (cli *grpcClient) DeliverTxSync( func (cli *grpcClient) CheckTxSync( ctx context.Context, - params types.RequestCheckTx, -) (*types.ResponseCheckTx, error) { + params abci.RequestCheckTx, +) (*abci.ResponseCheckTx, error) { reqres, err := cli.CheckTxAsync(ctx, params) if err != nil { @@ -406,8 +406,8 @@ func (cli *grpcClient) CheckTxSync( func (cli *grpcClient) QuerySync( ctx context.Context, - req types.RequestQuery, -) (*types.ResponseQuery, error) { + req abci.RequestQuery, +) (*abci.ResponseQuery, error) { reqres, err := cli.QueryAsync(ctx, req) if err != nil { return nil, err @@ -415,7 +415,7 @@ func (cli *grpcClient) QuerySync( return cli.finishSyncCall(reqres).GetQuery(), cli.Error() } -func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { +func (cli *grpcClient) CommitSync(ctx context.Context) (*abci.ResponseCommit, error) { reqres, err := cli.CommitAsync(ctx) if err != nil { return nil, err @@ -425,8 +425,8 @@ func (cli *grpcClient) CommitSync(ctx context.Context) (*types.ResponseCommit, e func (cli *grpcClient) InitChainSync( ctx context.Context, - params types.RequestInitChain, -) (*types.ResponseInitChain, error) { + params abci.RequestInitChain, +) (*abci.ResponseInitChain, error) { reqres, err := cli.InitChainAsync(ctx, params) if err != nil { @@ -437,8 +437,8 @@ func (cli *grpcClient) InitChainSync( func (cli *grpcClient) BeginBlockSync( ctx context.Context, - params types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { + params abci.RequestBeginBlock, +) (*abci.ResponseBeginBlock, error) { reqres, err := cli.BeginBlockAsync(ctx, params) if err != nil { @@ -449,8 +449,8 @@ func (cli *grpcClient) BeginBlockSync( func (cli *grpcClient) EndBlockSync( ctx context.Context, - params types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { + params abci.RequestEndBlock, +) (*abci.ResponseEndBlock, error) { reqres, err := cli.EndBlockAsync(ctx, params) if err != nil { @@ -461,8 +461,8 @@ func (cli *grpcClient) EndBlockSync( func (cli *grpcClient) ListSnapshotsSync( ctx context.Context, - params types.RequestListSnapshots, -) (*types.ResponseListSnapshots, error) { + params abci.RequestListSnapshots, +) (*abci.ResponseListSnapshots, error) { reqres, err := cli.ListSnapshotsAsync(ctx, params) if err != nil { @@ -473,8 +473,8 @@ func (cli *grpcClient) ListSnapshotsSync( func (cli *grpcClient) OfferSnapshotSync( ctx context.Context, - params types.RequestOfferSnapshot, -) (*types.ResponseOfferSnapshot, error) { + params abci.RequestOfferSnapshot, +) (*abci.ResponseOfferSnapshot, error) { reqres, err := cli.OfferSnapshotAsync(ctx, params) if err != nil { @@ -485,7 +485,7 @@ func (cli *grpcClient) OfferSnapshotSync( func (cli *grpcClient) LoadSnapshotChunkSync( ctx context.Context, - params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + params abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { reqres, err := cli.LoadSnapshotChunkAsync(ctx, params) if err != nil { @@ -496,7 +496,7 @@ func (cli *grpcClient) LoadSnapshotChunkSync( func (cli *grpcClient) ApplySnapshotChunkSync( ctx context.Context, - params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + params abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { reqres, err := cli.ApplySnapshotChunkAsync(ctx, params) if err != nil { diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 69457b5b0..ba9260542 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -3,9 +3,9 @@ package abcicli import ( "context" - types "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" + types "github.com/tendermint/tendermint/pkg/abci" ) // NOTE: use defer to unlock mutex because Application might panic (e.g., in diff --git a/abci/client/mocks/client.go b/abci/client/mocks/client.go index 6726ce95e..eecae9c60 100644 --- a/abci/client/mocks/client.go +++ b/abci/client/mocks/client.go @@ -3,15 +3,14 @@ package mocks import ( - context "context" - abcicli "github.com/tendermint/tendermint/abci/client" + abci "github.com/tendermint/tendermint/pkg/abci" + + context "context" log "github.com/tendermint/tendermint/libs/log" mock "github.com/stretchr/testify/mock" - - types "github.com/tendermint/tendermint/abci/types" ) // Client is an autogenerated mock type for the Client type @@ -20,11 +19,11 @@ type Client struct { } // ApplySnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 abci.RequestApplySnapshotChunk) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestApplySnapshotChunk) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -33,7 +32,7 @@ func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.Request } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestApplySnapshotChunk) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -43,20 +42,20 @@ func (_m *Client) ApplySnapshotChunkAsync(_a0 context.Context, _a1 types.Request } // ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { +func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseApplySnapshotChunk - if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + var r0 *abci.ResponseApplySnapshotChunk + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestApplySnapshotChunk) *abci.ResponseApplySnapshotChunk); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + r0 = ret.Get(0).(*abci.ResponseApplySnapshotChunk) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestApplySnapshotChunk) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -66,11 +65,11 @@ func (_m *Client) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestA } // BeginBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlock) (*abcicli.ReqRes, error) { +func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 abci.RequestBeginBlock) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestBeginBlock) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -79,7 +78,7 @@ func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlo } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestBeginBlock) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -89,20 +88,20 @@ func (_m *Client) BeginBlockAsync(_a0 context.Context, _a1 types.RequestBeginBlo } // BeginBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { +func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 abci.RequestBeginBlock) (*abci.ResponseBeginBlock, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseBeginBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + var r0 *abci.ResponseBeginBlock + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestBeginBlock) *abci.ResponseBeginBlock); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseBeginBlock) + r0 = ret.Get(0).(*abci.ResponseBeginBlock) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestBeginBlock) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -112,11 +111,11 @@ func (_m *Client) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBloc } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 abci.RequestCheckTx) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestCheckTx) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -125,7 +124,7 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (* } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestCheckTx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -135,20 +134,20 @@ func (_m *Client) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (* } // CheckTxSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (_m *Client) CheckTxSync(_a0 context.Context, _a1 abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseCheckTx - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok { + var r0 *abci.ResponseCheckTx + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestCheckTx) *abci.ResponseCheckTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCheckTx) + r0 = ret.Get(0).(*abci.ResponseCheckTx) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestCheckTx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -181,15 +180,15 @@ func (_m *Client) CommitAsync(_a0 context.Context) (*abcicli.ReqRes, error) { } // CommitSync provides a mock function with given fields: _a0 -func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) { +func (_m *Client) CommitSync(_a0 context.Context) (*abci.ResponseCommit, error) { ret := _m.Called(_a0) - var r0 *types.ResponseCommit - if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { + var r0 *abci.ResponseCommit + if rf, ok := ret.Get(0).(func(context.Context) *abci.ResponseCommit); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCommit) + r0 = ret.Get(0).(*abci.ResponseCommit) } } @@ -204,11 +203,11 @@ func (_m *Client) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 abci.RequestDeliverTx) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestDeliverTx) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -217,7 +216,7 @@ func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestDeliverTx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -227,20 +226,20 @@ func (_m *Client) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx } // DeliverTxSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 types.RequestDeliverTx) (*types.ResponseDeliverTx, error) { +func (_m *Client) DeliverTxSync(_a0 context.Context, _a1 abci.RequestDeliverTx) (*abci.ResponseDeliverTx, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseDeliverTx - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *types.ResponseDeliverTx); ok { + var r0 *abci.ResponseDeliverTx + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestDeliverTx) *abci.ResponseDeliverTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseDeliverTx) + r0 = ret.Get(0).(*abci.ResponseDeliverTx) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestDeliverTx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -273,15 +272,15 @@ func (_m *Client) EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, e } // EchoSync provides a mock function with given fields: ctx, msg -func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { +func (_m *Client) EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) { ret := _m.Called(ctx, msg) - var r0 *types.ResponseEcho - if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok { + var r0 *abci.ResponseEcho + if rf, ok := ret.Get(0).(func(context.Context, string) *abci.ResponseEcho); ok { r0 = rf(ctx, msg) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEcho) + r0 = ret.Get(0).(*abci.ResponseEcho) } } @@ -296,11 +295,11 @@ func (_m *Client) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho } // EndBlockAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) (*abcicli.ReqRes, error) { +func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 abci.RequestEndBlock) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestEndBlock) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -309,7 +308,7 @@ func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestEndBlock) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -319,20 +318,20 @@ func (_m *Client) EndBlockAsync(_a0 context.Context, _a1 types.RequestEndBlock) } // EndBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { +func (_m *Client) EndBlockSync(_a0 context.Context, _a1 abci.RequestEndBlock) (*abci.ResponseEndBlock, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseEndBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok { + var r0 *abci.ResponseEndBlock + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestEndBlock) *abci.ResponseEndBlock); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEndBlock) + r0 = ret.Get(0).(*abci.ResponseEndBlock) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestEndBlock) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -393,11 +392,11 @@ func (_m *Client) FlushSync(_a0 context.Context) error { } // InfoAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicli.ReqRes, error) { +func (_m *Client) InfoAsync(_a0 context.Context, _a1 abci.RequestInfo) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInfo) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -406,7 +405,7 @@ func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicl } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInfo) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -416,20 +415,20 @@ func (_m *Client) InfoAsync(_a0 context.Context, _a1 types.RequestInfo) (*abcicl } // InfoSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { +func (_m *Client) InfoSync(_a0 context.Context, _a1 abci.RequestInfo) (*abci.ResponseInfo, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseInfo - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok { + var r0 *abci.ResponseInfo + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInfo) *abci.ResponseInfo); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInfo) + r0 = ret.Get(0).(*abci.ResponseInfo) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInfo) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -439,11 +438,11 @@ func (_m *Client) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.R } // InitChainAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain) (*abcicli.ReqRes, error) { +func (_m *Client) InitChainAsync(_a0 context.Context, _a1 abci.RequestInitChain) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInitChain) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -452,7 +451,7 @@ func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInitChain) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -462,20 +461,20 @@ func (_m *Client) InitChainAsync(_a0 context.Context, _a1 types.RequestInitChain } // InitChainSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { +func (_m *Client) InitChainSync(_a0 context.Context, _a1 abci.RequestInitChain) (*abci.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseInitChain - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok { + var r0 *abci.ResponseInitChain + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInitChain) *abci.ResponseInitChain); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInitChain) + r0 = ret.Get(0).(*abci.ResponseInitChain) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInitChain) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -499,11 +498,11 @@ func (_m *Client) IsRunning() bool { } // ListSnapshotsAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListSnapshots) (*abcicli.ReqRes, error) { +func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 abci.RequestListSnapshots) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestListSnapshots) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -512,7 +511,7 @@ func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListS } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestListSnapshots) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -522,20 +521,20 @@ func (_m *Client) ListSnapshotsAsync(_a0 context.Context, _a1 types.RequestListS } // ListSnapshotsSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseListSnapshots - if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + var r0 *abci.ResponseListSnapshots + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestListSnapshots) *abci.ResponseListSnapshots); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseListSnapshots) + r0 = ret.Get(0).(*abci.ResponseListSnapshots) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestListSnapshots) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -545,11 +544,11 @@ func (_m *Client) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSn } // LoadSnapshotChunkAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) { +func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 abci.RequestLoadSnapshotChunk) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestLoadSnapshotChunk) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -558,7 +557,7 @@ func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestL } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestLoadSnapshotChunk) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -568,20 +567,20 @@ func (_m *Client) LoadSnapshotChunkAsync(_a0 context.Context, _a1 types.RequestL } // LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { +func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseLoadSnapshotChunk - if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + var r0 *abci.ResponseLoadSnapshotChunk + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestLoadSnapshotChunk) *abci.ResponseLoadSnapshotChunk); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + r0 = ret.Get(0).(*abci.ResponseLoadSnapshotChunk) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestLoadSnapshotChunk) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -591,11 +590,11 @@ func (_m *Client) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLo } // OfferSnapshotAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*abcicli.ReqRes, error) { +func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 abci.RequestOfferSnapshot) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestOfferSnapshot) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -604,7 +603,7 @@ func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOffer } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestOfferSnapshot) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -614,20 +613,20 @@ func (_m *Client) OfferSnapshotAsync(_a0 context.Context, _a1 types.RequestOffer } // OfferSnapshotSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +func (_m *Client) OfferSnapshotSync(_a0 context.Context, _a1 abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseOfferSnapshot - if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + var r0 *abci.ResponseOfferSnapshot + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestOfferSnapshot) *abci.ResponseOfferSnapshot); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + r0 = ret.Get(0).(*abci.ResponseOfferSnapshot) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestOfferSnapshot) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -670,11 +669,11 @@ func (_m *Client) OnStop() { } // QueryAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abcicli.ReqRes, error) { +func (_m *Client) QueryAsync(_a0 context.Context, _a1 abci.RequestQuery) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestQuery) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -683,7 +682,7 @@ func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abci } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestQuery) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -693,20 +692,20 @@ func (_m *Client) QueryAsync(_a0 context.Context, _a1 types.RequestQuery) (*abci } // QuerySync provides a mock function with given fields: _a0, _a1 -func (_m *Client) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { +func (_m *Client) QuerySync(_a0 context.Context, _a1 abci.RequestQuery) (*abci.ResponseQuery, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseQuery - if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok { + var r0 *abci.ResponseQuery + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestQuery) *abci.ResponseQuery); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseQuery) + r0 = ret.Get(0).(*abci.ResponseQuery) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestQuery) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 3fef8540d..b6f5777e0 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -11,11 +11,11 @@ import ( "reflect" "time" - "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/libs/timer" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) const ( @@ -45,8 +45,8 @@ type socketClient struct { mtx tmsync.RWMutex err error - reqSent *list.List // list of requests sent, waiting for response - resCb func(*types.Request, *types.Response) // called on all requests, if set. + reqSent *list.List // list of requests sent, waiting for response + resCb func(*abci.Request, *abci.Response) // called on all requests, if set. } var _ Client = (*socketClient)(nil) @@ -138,14 +138,14 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { } cli.willSendReq(reqres.R) - err := types.WriteMessage(reqres.R.Request, w) + err := abci.WriteMessage(reqres.R.Request, w) if err != nil { cli.stopForError(fmt.Errorf("write to buffer: %w", err)) return } // If it's a flush request, flush the current buffer. - if _, ok := reqres.R.Request.Value.(*types.Request_Flush); ok { + if _, ok := reqres.R.Request.Value.(*abci.Request_Flush); ok { err = w.Flush() if err != nil { cli.stopForError(fmt.Errorf("flush buffer: %w", err)) @@ -154,7 +154,7 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { } case <-cli.flushTimer.Ch: // flush queue select { - case cli.reqQueue <- &reqResWithContext{R: NewReqRes(types.ToRequestFlush()), C: context.Background()}: + case cli.reqQueue <- &reqResWithContext{R: NewReqRes(abci.ToRequestFlush()), C: context.Background()}: default: // Probably will fill the buffer, or retry later. } @@ -167,8 +167,8 @@ func (cli *socketClient) sendRequestsRoutine(conn io.Writer) { func (cli *socketClient) recvResponseRoutine(conn io.Reader) { r := bufio.NewReader(conn) for { - var res = &types.Response{} - err := types.ReadMessage(r, res) + var res = &abci.Response{} + err := abci.ReadMessage(r, res) if err != nil { cli.stopForError(fmt.Errorf("read message: %w", err)) return @@ -177,7 +177,7 @@ func (cli *socketClient) recvResponseRoutine(conn io.Reader) { // cli.Logger.Debug("Received response", "responseType", reflect.TypeOf(res), "response", res) switch r := res.Value.(type) { - case *types.Response_Exception: // app responded with error + case *abci.Response_Exception: // app responded with error // XXX After setting cli.err, release waiters (e.g. reqres.Done()) cli.stopForError(errors.New(r.Exception.Error)) return @@ -197,7 +197,7 @@ func (cli *socketClient) willSendReq(reqres *ReqRes) { cli.reqSent.PushBack(reqres) } -func (cli *socketClient) didRecvResponse(res *types.Response) error { +func (cli *socketClient) didRecvResponse(res *abci.Response) error { cli.mtx.Lock() defer cli.mtx.Unlock() @@ -234,71 +234,71 @@ func (cli *socketClient) didRecvResponse(res *types.Response) error { //---------------------------------------- func (cli *socketClient) EchoAsync(ctx context.Context, msg string) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestEcho(msg)) + return cli.queueRequestAsync(ctx, abci.ToRequestEcho(msg)) } func (cli *socketClient) FlushAsync(ctx context.Context) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestFlush()) + return cli.queueRequestAsync(ctx, abci.ToRequestFlush()) } -func (cli *socketClient) InfoAsync(ctx context.Context, req types.RequestInfo) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestInfo(req)) +func (cli *socketClient) InfoAsync(ctx context.Context, req abci.RequestInfo) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestInfo(req)) } -func (cli *socketClient) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestDeliverTx(req)) +func (cli *socketClient) DeliverTxAsync(ctx context.Context, req abci.RequestDeliverTx) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestDeliverTx(req)) } -func (cli *socketClient) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestCheckTx(req)) +func (cli *socketClient) CheckTxAsync(ctx context.Context, req abci.RequestCheckTx) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestCheckTx(req)) } -func (cli *socketClient) QueryAsync(ctx context.Context, req types.RequestQuery) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestQuery(req)) +func (cli *socketClient) QueryAsync(ctx context.Context, req abci.RequestQuery) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestQuery(req)) } func (cli *socketClient) CommitAsync(ctx context.Context) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestCommit()) + return cli.queueRequestAsync(ctx, abci.ToRequestCommit()) } -func (cli *socketClient) InitChainAsync(ctx context.Context, req types.RequestInitChain) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestInitChain(req)) +func (cli *socketClient) InitChainAsync(ctx context.Context, req abci.RequestInitChain) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestInitChain(req)) } -func (cli *socketClient) BeginBlockAsync(ctx context.Context, req types.RequestBeginBlock) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestBeginBlock(req)) +func (cli *socketClient) BeginBlockAsync(ctx context.Context, req abci.RequestBeginBlock) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestBeginBlock(req)) } -func (cli *socketClient) EndBlockAsync(ctx context.Context, req types.RequestEndBlock) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestEndBlock(req)) +func (cli *socketClient) EndBlockAsync(ctx context.Context, req abci.RequestEndBlock) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestEndBlock(req)) } -func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req types.RequestListSnapshots) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestListSnapshots(req)) +func (cli *socketClient) ListSnapshotsAsync(ctx context.Context, req abci.RequestListSnapshots) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestListSnapshots(req)) } -func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req types.RequestOfferSnapshot) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestOfferSnapshot(req)) +func (cli *socketClient) OfferSnapshotAsync(ctx context.Context, req abci.RequestOfferSnapshot) (*ReqRes, error) { + return cli.queueRequestAsync(ctx, abci.ToRequestOfferSnapshot(req)) } func (cli *socketClient) LoadSnapshotChunkAsync( ctx context.Context, - req types.RequestLoadSnapshotChunk, + req abci.RequestLoadSnapshotChunk, ) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestLoadSnapshotChunk(req)) + return cli.queueRequestAsync(ctx, abci.ToRequestLoadSnapshotChunk(req)) } func (cli *socketClient) ApplySnapshotChunkAsync( ctx context.Context, - req types.RequestApplySnapshotChunk, + req abci.RequestApplySnapshotChunk, ) (*ReqRes, error) { - return cli.queueRequestAsync(ctx, types.ToRequestApplySnapshotChunk(req)) + return cli.queueRequestAsync(ctx, abci.ToRequestApplySnapshotChunk(req)) } //---------------------------------------- func (cli *socketClient) FlushSync(ctx context.Context) error { - reqRes, err := cli.queueRequest(ctx, types.ToRequestFlush(), true) + reqRes, err := cli.queueRequest(ctx, abci.ToRequestFlush(), true) if err != nil { return queueErr(err) } @@ -322,8 +322,8 @@ func (cli *socketClient) FlushSync(ctx context.Context) error { } } -func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEcho(msg)) +func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestEcho(msg)) if err != nil { return nil, err } @@ -332,9 +332,9 @@ func (cli *socketClient) EchoSync(ctx context.Context, msg string) (*types.Respo func (cli *socketClient) InfoSync( ctx context.Context, - req types.RequestInfo, -) (*types.ResponseInfo, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInfo(req)) + req abci.RequestInfo, +) (*abci.ResponseInfo, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestInfo(req)) if err != nil { return nil, err } @@ -343,10 +343,10 @@ func (cli *socketClient) InfoSync( func (cli *socketClient) DeliverTxSync( ctx context.Context, - req types.RequestDeliverTx, -) (*types.ResponseDeliverTx, error) { + req abci.RequestDeliverTx, +) (*abci.ResponseDeliverTx, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestDeliverTx(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestDeliverTx(req)) if err != nil { return nil, err } @@ -355,9 +355,9 @@ func (cli *socketClient) DeliverTxSync( func (cli *socketClient) CheckTxSync( ctx context.Context, - req types.RequestCheckTx, -) (*types.ResponseCheckTx, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCheckTx(req)) + req abci.RequestCheckTx, +) (*abci.ResponseCheckTx, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestCheckTx(req)) if err != nil { return nil, err } @@ -366,17 +366,17 @@ func (cli *socketClient) CheckTxSync( func (cli *socketClient) QuerySync( ctx context.Context, - req types.RequestQuery, -) (*types.ResponseQuery, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestQuery(req)) + req abci.RequestQuery, +) (*abci.ResponseQuery, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestQuery(req)) if err != nil { return nil, err } return reqres.Response.GetQuery(), nil } -func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestCommit()) +func (cli *socketClient) CommitSync(ctx context.Context) (*abci.ResponseCommit, error) { + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestCommit()) if err != nil { return nil, err } @@ -385,10 +385,10 @@ func (cli *socketClient) CommitSync(ctx context.Context) (*types.ResponseCommit, func (cli *socketClient) InitChainSync( ctx context.Context, - req types.RequestInitChain, -) (*types.ResponseInitChain, error) { + req abci.RequestInitChain, +) (*abci.ResponseInitChain, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestInitChain(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestInitChain(req)) if err != nil { return nil, err } @@ -397,10 +397,10 @@ func (cli *socketClient) InitChainSync( func (cli *socketClient) BeginBlockSync( ctx context.Context, - req types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { + req abci.RequestBeginBlock, +) (*abci.ResponseBeginBlock, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestBeginBlock(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestBeginBlock(req)) if err != nil { return nil, err } @@ -409,10 +409,10 @@ func (cli *socketClient) BeginBlockSync( func (cli *socketClient) EndBlockSync( ctx context.Context, - req types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { + req abci.RequestEndBlock, +) (*abci.ResponseEndBlock, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestEndBlock(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestEndBlock(req)) if err != nil { return nil, err } @@ -421,10 +421,10 @@ func (cli *socketClient) EndBlockSync( func (cli *socketClient) ListSnapshotsSync( ctx context.Context, - req types.RequestListSnapshots, -) (*types.ResponseListSnapshots, error) { + req abci.RequestListSnapshots, +) (*abci.ResponseListSnapshots, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestListSnapshots(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestListSnapshots(req)) if err != nil { return nil, err } @@ -433,10 +433,10 @@ func (cli *socketClient) ListSnapshotsSync( func (cli *socketClient) OfferSnapshotSync( ctx context.Context, - req types.RequestOfferSnapshot, -) (*types.ResponseOfferSnapshot, error) { + req abci.RequestOfferSnapshot, +) (*abci.ResponseOfferSnapshot, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestOfferSnapshot(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestOfferSnapshot(req)) if err != nil { return nil, err } @@ -445,9 +445,9 @@ func (cli *socketClient) OfferSnapshotSync( func (cli *socketClient) LoadSnapshotChunkSync( ctx context.Context, - req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + req abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestLoadSnapshotChunk(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestLoadSnapshotChunk(req)) if err != nil { return nil, err } @@ -456,9 +456,9 @@ func (cli *socketClient) LoadSnapshotChunkSync( func (cli *socketClient) ApplySnapshotChunkSync( ctx context.Context, - req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + req abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { - reqres, err := cli.queueRequestAndFlushSync(ctx, types.ToRequestApplySnapshotChunk(req)) + reqres, err := cli.queueRequestAndFlushSync(ctx, abci.ToRequestApplySnapshotChunk(req)) if err != nil { return nil, err } @@ -475,7 +475,7 @@ func (cli *socketClient) ApplySnapshotChunkSync( // non-nil). // // The caller is responsible for checking cli.Error. -func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, sync bool) (*ReqRes, error) { +func (cli *socketClient) queueRequest(ctx context.Context, req *abci.Request, sync bool) (*ReqRes, error) { reqres := NewReqRes(req) if sync { @@ -494,7 +494,7 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s // Maybe auto-flush, or unset auto-flush switch req.Value.(type) { - case *types.Request_Flush: + case *abci.Request_Flush: cli.flushTimer.Unset() default: cli.flushTimer.Set() @@ -505,7 +505,7 @@ func (cli *socketClient) queueRequest(ctx context.Context, req *types.Request, s func (cli *socketClient) queueRequestAsync( ctx context.Context, - req *types.Request, + req *abci.Request, ) (*ReqRes, error) { reqres, err := cli.queueRequest(ctx, req, false) @@ -518,7 +518,7 @@ func (cli *socketClient) queueRequestAsync( func (cli *socketClient) queueRequestAndFlushSync( ctx context.Context, - req *types.Request, + req *abci.Request, ) (*ReqRes, error) { reqres, err := cli.queueRequest(ctx, req, true) @@ -561,36 +561,36 @@ LOOP: //---------------------------------------- -func resMatchesReq(req *types.Request, res *types.Response) (ok bool) { +func resMatchesReq(req *abci.Request, res *abci.Response) (ok bool) { switch req.Value.(type) { - case *types.Request_Echo: - _, ok = res.Value.(*types.Response_Echo) - case *types.Request_Flush: - _, ok = res.Value.(*types.Response_Flush) - case *types.Request_Info: - _, ok = res.Value.(*types.Response_Info) - case *types.Request_DeliverTx: - _, ok = res.Value.(*types.Response_DeliverTx) - case *types.Request_CheckTx: - _, ok = res.Value.(*types.Response_CheckTx) - case *types.Request_Commit: - _, ok = res.Value.(*types.Response_Commit) - case *types.Request_Query: - _, ok = res.Value.(*types.Response_Query) - case *types.Request_InitChain: - _, ok = res.Value.(*types.Response_InitChain) - case *types.Request_BeginBlock: - _, ok = res.Value.(*types.Response_BeginBlock) - case *types.Request_EndBlock: - _, ok = res.Value.(*types.Response_EndBlock) - case *types.Request_ApplySnapshotChunk: - _, ok = res.Value.(*types.Response_ApplySnapshotChunk) - case *types.Request_LoadSnapshotChunk: - _, ok = res.Value.(*types.Response_LoadSnapshotChunk) - case *types.Request_ListSnapshots: - _, ok = res.Value.(*types.Response_ListSnapshots) - case *types.Request_OfferSnapshot: - _, ok = res.Value.(*types.Response_OfferSnapshot) + case *abci.Request_Echo: + _, ok = res.Value.(*abci.Response_Echo) + case *abci.Request_Flush: + _, ok = res.Value.(*abci.Response_Flush) + case *abci.Request_Info: + _, ok = res.Value.(*abci.Response_Info) + case *abci.Request_DeliverTx: + _, ok = res.Value.(*abci.Response_DeliverTx) + case *abci.Request_CheckTx: + _, ok = res.Value.(*abci.Response_CheckTx) + case *abci.Request_Commit: + _, ok = res.Value.(*abci.Response_Commit) + case *abci.Request_Query: + _, ok = res.Value.(*abci.Response_Query) + case *abci.Request_InitChain: + _, ok = res.Value.(*abci.Response_InitChain) + case *abci.Request_BeginBlock: + _, ok = res.Value.(*abci.Response_BeginBlock) + case *abci.Request_EndBlock: + _, ok = res.Value.(*abci.Response_EndBlock) + case *abci.Request_ApplySnapshotChunk: + _, ok = res.Value.(*abci.Response_ApplySnapshotChunk) + case *abci.Request_LoadSnapshotChunk: + _, ok = res.Value.(*abci.Response_LoadSnapshotChunk) + case *abci.Request_ListSnapshots: + _, ok = res.Value.(*abci.Response_ListSnapshots) + case *abci.Request_OfferSnapshot: + _, ok = res.Value.(*abci.Response_OfferSnapshot) } return ok } diff --git a/abci/client/socket_client_test.go b/abci/client/socket_client_test.go index d61d729e1..23736b7b4 100644 --- a/abci/client/socket_client_test.go +++ b/abci/client/socket_client_test.go @@ -13,8 +13,8 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/server" - "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) var ctx = context.Background() @@ -37,7 +37,7 @@ func TestProperSyncCalls(t *testing.T) { resp := make(chan error, 1) go func() { // This is BeginBlockSync unrolled.... - reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) + reqres, err := c.BeginBlockAsync(ctx, abci.RequestBeginBlock{}) assert.NoError(t, err) err = c.FlushSync(context.Background()) assert.NoError(t, err) @@ -73,7 +73,7 @@ func TestHangingSyncCalls(t *testing.T) { resp := make(chan error, 1) go func() { // Start BeginBlock and flush it - reqres, err := c.BeginBlockAsync(ctx, types.RequestBeginBlock{}) + reqres, err := c.BeginBlockAsync(ctx, abci.RequestBeginBlock{}) assert.NoError(t, err) flush, err := c.FlushAsync(ctx) assert.NoError(t, err) @@ -99,7 +99,7 @@ func TestHangingSyncCalls(t *testing.T) { } } -func setupClientServer(t *testing.T, app types.Application) ( +func setupClientServer(t *testing.T, app abci.Application) ( service.Service, abcicli.Client) { // some port between 20k and 30k port := 20000 + rand.Int31()%10000 @@ -118,10 +118,10 @@ func setupClientServer(t *testing.T, app types.Application) ( } type slowApp struct { - types.BaseApplication + abci.BaseApplication } -func (slowApp) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { +func (slowApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { time.Sleep(200 * time.Millisecond) - return types.ResponseBeginBlock{} + return abci.ResponseBeginBlock{} } diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b9af27e22..e21c8bee1 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -20,8 +20,8 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" servertest "github.com/tendermint/tendermint/abci/tests/server" - "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/abci/version" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/proto/tendermint/crypto" ) @@ -459,7 +459,7 @@ func cmdInfo(cmd *cobra.Command, args []string) error { if len(args) == 1 { version = args[0] } - res, err := client.InfoSync(ctx, types.RequestInfo{Version: version}) + res, err := client.InfoSync(ctx, abci.RequestInfo{Version: version}) if err != nil { return err } @@ -484,7 +484,7 @@ func cmdDeliverTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) + res, err := client.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) if err != nil { return err } @@ -510,7 +510,7 @@ func cmdCheckTx(cmd *cobra.Command, args []string) error { if err != nil { return err } - res, err := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) + res, err := client.CheckTxSync(ctx, abci.RequestCheckTx{Tx: txBytes}) if err != nil { return err } @@ -550,7 +550,7 @@ func cmdQuery(cmd *cobra.Command, args []string) error { return err } - resQuery, err := client.QuerySync(ctx, types.RequestQuery{ + resQuery, err := client.QuerySync(ctx, abci.RequestQuery{ Data: queryBytes, Path: flagPath, Height: int64(flagHeight), @@ -577,7 +577,7 @@ func cmdKVStore(cmd *cobra.Command, args []string) error { logger := log.MustNewDefaultLogger(log.LogFormatPlain, log.LogLevelInfo, false) // Create the application - in memory or persisted to disk - var app types.Application + var app abci.Application if flagPersist == "" { app = kvstore.NewApplication() } else { @@ -616,7 +616,7 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) { } // Always print the status code. - if rsp.Code == types.CodeTypeOK { + if rsp.Code == abci.CodeTypeOK { fmt.Printf("-> code: OK\n") } else { fmt.Printf("-> code: %d\n", rsp.Code) diff --git a/abci/example/example_test.go b/abci/example/example_test.go index fdfc5515e..96b177858 100644 --- a/abci/example/example_test.go +++ b/abci/example/example_test.go @@ -21,7 +21,7 @@ import ( "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" - "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" ) func init() { @@ -35,15 +35,15 @@ func TestKVStore(t *testing.T) { func TestBaseApp(t *testing.T) { fmt.Println("### Testing BaseApp") - testStream(t, types.NewBaseApplication()) + testStream(t, abci.NewBaseApplication()) } func TestGRPC(t *testing.T) { fmt.Println("### Testing GRPC") - testGRPCSync(t, types.NewGRPCApplication(types.NewBaseApplication())) + testGRPCSync(t, abci.NewGRPCApplication(abci.NewBaseApplication())) } -func testStream(t *testing.T, app types.Application) { +func testStream(t *testing.T, app abci.Application) { const numDeliverTxs = 20000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) @@ -73,10 +73,10 @@ func testStream(t *testing.T, app types.Application) { done := make(chan struct{}) counter := 0 - client.SetResponseCallback(func(req *types.Request, res *types.Response) { + client.SetResponseCallback(func(req *abci.Request, res *abci.Response) { // Process response switch r := res.Value.(type) { - case *types.Response_DeliverTx: + case *abci.Response_DeliverTx: counter++ if r.DeliverTx.Code != code.CodeTypeOK { t.Error("DeliverTx failed with ret_code", r.DeliverTx.Code) @@ -91,7 +91,7 @@ func testStream(t *testing.T, app types.Application) { }() return } - case *types.Response_Flush: + case *abci.Response_Flush: // ignore default: t.Error("Unexpected response type", reflect.TypeOf(res.Value)) @@ -103,7 +103,7 @@ func testStream(t *testing.T, app types.Application) { // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request - _, err = client.DeliverTxAsync(ctx, types.RequestDeliverTx{Tx: []byte("test")}) + _, err = client.DeliverTxAsync(ctx, abci.RequestDeliverTx{Tx: []byte("test")}) require.NoError(t, err) // Sometimes send flush messages @@ -127,7 +127,7 @@ func dialerFunc(ctx context.Context, addr string) (net.Conn, error) { return tmnet.Connect(addr) } -func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { +func testGRPCSync(t *testing.T, app abci.ABCIApplicationServer) { numDeliverTxs := 2000 socketFile := fmt.Sprintf("test-%08x.sock", rand.Int31n(1<<30)) defer os.Remove(socketFile) @@ -158,12 +158,12 @@ func testGRPCSync(t *testing.T, app types.ABCIApplicationServer) { } }) - client := types.NewABCIApplicationClient(conn) + client := abci.NewABCIApplicationClient(conn) // Write requests for counter := 0; counter < numDeliverTxs; counter++ { // Send request - response, err := client.DeliverTx(context.Background(), &types.RequestDeliverTx{Tx: []byte("test")}) + response, err := client.DeliverTx(context.Background(), &abci.RequestDeliverTx{Tx: []byte("test")}) if err != nil { t.Fatalf("Error in GRPC DeliverTx: %v", err.Error()) } diff --git a/abci/example/kvstore/helpers.go b/abci/example/kvstore/helpers.go index 38bb42ea8..54b0a83ad 100644 --- a/abci/example/kvstore/helpers.go +++ b/abci/example/kvstore/helpers.go @@ -3,17 +3,17 @@ package kvstore import ( mrand "math/rand" - "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" ) // RandVal creates one random validator, with a key derived // from the input value -func RandVal(i int) types.ValidatorUpdate { +func RandVal(i int) abci.ValidatorUpdate { pubkey := tmrand.Bytes(32) // Random value between [0, 2^16 - 1] power := mrand.Uint32() & (1<<16 - 1) // nolint:gosec // G404: Use of weak random number generator - v := types.UpdateValidator(pubkey, int64(power), "") + v := abci.UpdateValidator(pubkey, int64(power), "") return v } @@ -21,8 +21,8 @@ func RandVal(i int) types.ValidatorUpdate { // the application. Note that the keys are deterministically // derived from the index in the array, while the power is // random (Change this if not desired) -func RandVals(cnt int) []types.ValidatorUpdate { - res := make([]types.ValidatorUpdate, cnt) +func RandVals(cnt int) []abci.ValidatorUpdate { + res := make([]abci.ValidatorUpdate, cnt) for i := 0; i < cnt; i++ { res[i] = RandVal(i) } @@ -33,7 +33,7 @@ func RandVals(cnt int) []types.ValidatorUpdate { // which allows tests to pass and is fine as long as you // don't make any tx that modify the validator state func InitKVStore(app *PersistentKVStoreApplication) { - app.InitChain(types.RequestInitChain{ + app.InitChain(abci.RequestInitChain{ Validators: RandVals(1), }) } diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 97256c8ac..4785f255e 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -9,7 +9,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/code" - "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/version" ) @@ -61,10 +61,10 @@ func prefixKey(key []byte) []byte { //--------------------------------------------------- -var _ types.Application = (*Application)(nil) +var _ abci.Application = (*Application)(nil) type Application struct { - types.BaseApplication + abci.BaseApplication state State RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight) @@ -75,8 +75,8 @@ func NewApplication() *Application { return &Application{state: state} } -func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{ +func (app *Application) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { + return abci.ResponseInfo{ Data: fmt.Sprintf("{\"size\":%v}", app.state.Size), Version: version.ABCIVersion, AppVersion: ProtocolVersion, @@ -86,7 +86,7 @@ func (app *Application) Info(req types.RequestInfo) (resInfo types.ResponseInfo) } // tx is either "key=value" or just arbitrary bytes -func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { +func (app *Application) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { var key, value string parts := bytes.Split(req.Tx, []byte("=")) @@ -102,10 +102,10 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli } app.state.Size++ - events := []types.Event{ + events := []abci.Event{ { Type: "app", - Attributes: []types.EventAttribute{ + Attributes: []abci.EventAttribute{ {Key: "creator", Value: "Cosmoshi Netowoko", Index: true}, {Key: "key", Value: key, Index: true}, {Key: "index_key", Value: "index is working", Index: true}, @@ -114,14 +114,14 @@ func (app *Application) DeliverTx(req types.RequestDeliverTx) types.ResponseDeli }, } - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} + return abci.ResponseDeliverTx{Code: code.CodeTypeOK, Events: events} } -func (app *Application) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { - return types.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} +func (app *Application) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { + return abci.ResponseCheckTx{Code: code.CodeTypeOK, GasWanted: 1} } -func (app *Application) Commit() types.ResponseCommit { +func (app *Application) Commit() abci.ResponseCommit { // Using a memdb - just return the big endian size of the db appHash := make([]byte, 8) binary.PutVarint(appHash, app.state.Size) @@ -129,7 +129,7 @@ func (app *Application) Commit() types.ResponseCommit { app.state.Height++ saveState(app.state) - resp := types.ResponseCommit{Data: appHash} + resp := abci.ResponseCommit{Data: appHash} if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks { resp.RetainHeight = app.state.Height - app.RetainBlocks + 1 } @@ -137,7 +137,7 @@ func (app *Application) Commit() types.ResponseCommit { } // Returns an associated value or nil if missing. -func (app *Application) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { +func (app *Application) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { if reqQuery.Prove { value, err := app.state.db.Get(prefixKey(reqQuery.Data)) if err != nil { diff --git a/abci/example/kvstore/kvstore_test.go b/abci/example/kvstore/kvstore_test.go index a52312a00..34c012709 100644 --- a/abci/example/kvstore/kvstore_test.go +++ b/abci/example/kvstore/kvstore_test.go @@ -15,7 +15,7 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/code" abciserver "github.com/tendermint/tendermint/abci/server" - "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" ) @@ -26,8 +26,8 @@ const ( var ctx = context.Background() -func testKVStore(t *testing.T, app types.Application, tx []byte, key, value string) { - req := types.RequestDeliverTx{Tx: tx} +func testKVStore(t *testing.T, app abci.Application, tx []byte, key, value string) { + req := abci.RequestDeliverTx{Tx: tx} ar := app.DeliverTx(req) require.False(t, ar.IsErr(), ar) // repeating tx doesn't raise error @@ -36,11 +36,11 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri // commit app.Commit() - info := app.Info(types.RequestInfo{}) + info := app.Info(abci.RequestInfo{}) require.NotZero(t, info.LastBlockHeight) // make sure query is fine - resQuery := app.Query(types.RequestQuery{ + resQuery := app.Query(abci.RequestQuery{ Path: "/store", Data: []byte(key), }) @@ -50,7 +50,7 @@ func testKVStore(t *testing.T, app types.Application, tx []byte, key, value stri require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine - resQuery = app.Query(types.RequestQuery{ + resQuery = app.Query(abci.RequestQuery{ Path: "/store", Data: []byte(key), Prove: true, @@ -98,7 +98,7 @@ func TestPersistentKVStoreInfo(t *testing.T) { InitKVStore(kvstore) height := int64(0) - resInfo := kvstore.Info(types.RequestInfo{}) + resInfo := kvstore.Info(abci.RequestInfo{}) if resInfo.LastBlockHeight != height { t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) } @@ -109,11 +109,11 @@ func TestPersistentKVStoreInfo(t *testing.T) { header := tmproto.Header{ Height: height, } - kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) - kvstore.EndBlock(types.RequestEndBlock{Height: header.Height}) + kvstore.BeginBlock(abci.RequestBeginBlock{Hash: hash, Header: header}) + kvstore.EndBlock(abci.RequestEndBlock{Height: header.Height}) kvstore.Commit() - resInfo = kvstore.Info(types.RequestInfo{}) + resInfo = kvstore.Info(abci.RequestInfo{}) if resInfo.LastBlockHeight != height { t.Fatalf("expected height of %d, got %d", height, resInfo.LastBlockHeight) } @@ -133,18 +133,18 @@ func TestValUpdates(t *testing.T) { nInit := 5 vals := RandVals(total) // initialize with the first nInit - kvstore.InitChain(types.RequestInitChain{ + kvstore.InitChain(abci.RequestInitChain{ Validators: vals[:nInit], }) vals1, vals2 := vals[:nInit], kvstore.Validators() valsEqual(t, vals1, vals2) - var v1, v2, v3 types.ValidatorUpdate + var v1, v2, v3 abci.ValidatorUpdate // add some validators v1, v2 = vals[nInit], vals[nInit+1] - diff := []types.ValidatorUpdate{v1, v2} + diff := []abci.ValidatorUpdate{v1, v2} tx1 := MakeValSetChangeTx(v1.PubKey, v1.Power) tx2 := MakeValSetChangeTx(v2.PubKey, v2.Power) @@ -158,7 +158,7 @@ func TestValUpdates(t *testing.T) { v1.Power = 0 v2.Power = 0 v3.Power = 0 - diff = []types.ValidatorUpdate{v1, v2, v3} + diff = []abci.ValidatorUpdate{v1, v2, v3} tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) tx2 = MakeValSetChangeTx(v2.PubKey, v2.Power) tx3 := MakeValSetChangeTx(v3.PubKey, v3.Power) @@ -176,12 +176,12 @@ func TestValUpdates(t *testing.T) { } else { v1.Power = 5 } - diff = []types.ValidatorUpdate{v1} + diff = []abci.ValidatorUpdate{v1} tx1 = MakeValSetChangeTx(v1.PubKey, v1.Power) makeApplyBlock(t, kvstore, 3, diff, tx1) - vals1 = append([]types.ValidatorUpdate{v1}, vals1[1:]...) + vals1 = append([]abci.ValidatorUpdate{v1}, vals1[1:]...) vals2 = kvstore.Validators() valsEqual(t, vals1, vals2) @@ -189,9 +189,9 @@ func TestValUpdates(t *testing.T) { func makeApplyBlock( t *testing.T, - kvstore types.Application, + kvstore abci.Application, heightInt int, - diff []types.ValidatorUpdate, + diff []abci.ValidatorUpdate, txs ...[]byte) { // make and apply block height := int64(heightInt) @@ -200,13 +200,13 @@ func makeApplyBlock( Height: height, } - kvstore.BeginBlock(types.RequestBeginBlock{Hash: hash, Header: header}) + kvstore.BeginBlock(abci.RequestBeginBlock{Hash: hash, Header: header}) for _, tx := range txs { - if r := kvstore.DeliverTx(types.RequestDeliverTx{Tx: tx}); r.IsErr() { + if r := kvstore.DeliverTx(abci.RequestDeliverTx{Tx: tx}); r.IsErr() { t.Fatal(r) } } - resEndBlock := kvstore.EndBlock(types.RequestEndBlock{Height: header.Height}) + resEndBlock := kvstore.EndBlock(abci.RequestEndBlock{Height: header.Height}) kvstore.Commit() valsEqual(t, diff, resEndBlock.ValidatorUpdates) @@ -214,12 +214,12 @@ func makeApplyBlock( } // order doesn't matter -func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { +func valsEqual(t *testing.T, vals1, vals2 []abci.ValidatorUpdate) { if len(vals1) != len(vals2) { t.Fatalf("vals dont match in len. got %d, expected %d", len(vals2), len(vals1)) } - sort.Sort(types.ValidatorUpdates(vals1)) - sort.Sort(types.ValidatorUpdates(vals2)) + sort.Sort(abci.ValidatorUpdates(vals1)) + sort.Sort(abci.ValidatorUpdates(vals2)) for i, v1 := range vals1 { v2 := vals2[i] if !v1.PubKey.Equal(v2.PubKey) || @@ -229,7 +229,7 @@ func valsEqual(t *testing.T, vals1, vals2 []types.ValidatorUpdate) { } } -func makeSocketClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeSocketClientServer(app abci.Application, name string) (abcicli.Client, service.Service, error) { // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() @@ -253,12 +253,12 @@ func makeSocketClientServer(app types.Application, name string) (abcicli.Client, return client, server, nil } -func makeGRPCClientServer(app types.Application, name string) (abcicli.Client, service.Service, error) { +func makeGRPCClientServer(app abci.Application, name string) (abcicli.Client, service.Service, error) { // Start the listener socket := fmt.Sprintf("unix://%s.sock", name) logger := log.TestingLogger() - gapp := types.NewGRPCApplication(app) + gapp := abci.NewGRPCApplication(app) server := abciserver.NewGRPCServer(socket, gapp) server.SetLogger(logger.With("module", "abci-server")) if err := server.Start(); err != nil { @@ -326,23 +326,23 @@ func runClientTests(t *testing.T, client abcicli.Client) { } func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) { - ar, err := app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) + ar, err := app.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) // repeating tx doesn't raise error - ar, err = app.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: tx}) + ar, err = app.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: tx}) require.NoError(t, err) require.False(t, ar.IsErr(), ar) // commit _, err = app.CommitSync(ctx) require.NoError(t, err) - info, err := app.InfoSync(ctx, types.RequestInfo{}) + info, err := app.InfoSync(ctx, abci.RequestInfo{}) require.NoError(t, err) require.NotZero(t, info.LastBlockHeight) // make sure query is fine - resQuery, err := app.QuerySync(ctx, types.RequestQuery{ + resQuery, err := app.QuerySync(ctx, abci.RequestQuery{ Path: "/store", Data: []byte(key), }) @@ -353,7 +353,7 @@ func testClient(t *testing.T, app abcicli.Client, tx []byte, key, value string) require.EqualValues(t, info.LastBlockHeight, resQuery.Height) // make sure proof is fine - resQuery, err = app.QuerySync(ctx, types.RequestQuery{ + resQuery, err = app.QuerySync(ctx, abci.RequestQuery{ Path: "/store", Data: []byte(key), Prove: true, diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 0fcfcadf7..cc8cbc207 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -10,9 +10,9 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/code" - "github.com/tendermint/tendermint/abci/types" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" pc "github.com/tendermint/tendermint/proto/tendermint/crypto" ) @@ -22,13 +22,13 @@ const ( //----------------------------------------- -var _ types.Application = (*PersistentKVStoreApplication)(nil) +var _ abci.Application = (*PersistentKVStoreApplication)(nil) type PersistentKVStoreApplication struct { app *Application // validator set - ValUpdates []types.ValidatorUpdate + ValUpdates []abci.ValidatorUpdate valAddrToPubKeyMap map[string]pc.PublicKey @@ -59,7 +59,7 @@ func (app *PersistentKVStoreApplication) SetLogger(l log.Logger) { app.logger = l } -func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.ResponseInfo { +func (app *PersistentKVStoreApplication) Info(req abci.RequestInfo) abci.ResponseInfo { res := app.app.Info(req) res.LastBlockHeight = app.app.state.Height res.LastBlockAppHash = app.app.state.AppHash @@ -67,7 +67,7 @@ func (app *PersistentKVStoreApplication) Info(req types.RequestInfo) types.Respo } // tx is either "val:pubkey!power" or "key=value" or just arbitrary bytes -func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) DeliverTx(req abci.RequestDeliverTx) abci.ResponseDeliverTx { // if it starts with "val:", update the validator set // format is "val:pubkey!power" if isValidatorTx(req.Tx) { @@ -80,18 +80,18 @@ func (app *PersistentKVStoreApplication) DeliverTx(req types.RequestDeliverTx) t return app.app.DeliverTx(req) } -func (app *PersistentKVStoreApplication) CheckTx(req types.RequestCheckTx) types.ResponseCheckTx { +func (app *PersistentKVStoreApplication) CheckTx(req abci.RequestCheckTx) abci.ResponseCheckTx { return app.app.CheckTx(req) } // Commit will panic if InitChain was not called -func (app *PersistentKVStoreApplication) Commit() types.ResponseCommit { +func (app *PersistentKVStoreApplication) Commit() abci.ResponseCommit { return app.app.Commit() } -// When path=/val and data={validator address}, returns the validator update (types.ValidatorUpdate) varint encoded. +// When path=/val and data={validator address}, returns the validator update (abci.ValidatorUpdate) varint encoded. // For any other path, returns an associated value or nil if missing. -func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { +func (app *PersistentKVStoreApplication) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { switch reqQuery.Path { case "/val": key := []byte("val:" + string(reqQuery.Data)) @@ -109,27 +109,27 @@ func (app *PersistentKVStoreApplication) Query(reqQuery types.RequestQuery) (res } // Save the validators in the merkle tree -func (app *PersistentKVStoreApplication) InitChain(req types.RequestInitChain) types.ResponseInitChain { +func (app *PersistentKVStoreApplication) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { for _, v := range req.Validators { r := app.updateValidator(v) if r.IsErr() { app.logger.Error("Error updating validators", "r", r) } } - return types.ResponseInitChain{} + return abci.ResponseInitChain{} } // Track the block hash and header information -func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) types.ResponseBeginBlock { +func (app *PersistentKVStoreApplication) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { // reset valset changes - app.ValUpdates = make([]types.ValidatorUpdate, 0) + app.ValUpdates = make([]abci.ValidatorUpdate, 0) // Punish validators who committed equivocation. for _, ev := range req.ByzantineValidators { - if ev.Type == types.EvidenceType_DUPLICATE_VOTE { + if ev.Type == abci.EvidenceType_DUPLICATE_VOTE { addr := string(ev.Validator.Address) if pubKey, ok := app.valAddrToPubKeyMap[addr]; ok { - app.updateValidator(types.ValidatorUpdate{ + app.updateValidator(abci.ValidatorUpdate{ PubKey: pubKey, Power: ev.Validator.Power - 1, }) @@ -142,46 +142,46 @@ func (app *PersistentKVStoreApplication) BeginBlock(req types.RequestBeginBlock) } } - return types.ResponseBeginBlock{} + return abci.ResponseBeginBlock{} } // Update the validator set -func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { - return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} +func (app *PersistentKVStoreApplication) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { + return abci.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} } func (app *PersistentKVStoreApplication) ListSnapshots( - req types.RequestListSnapshots) types.ResponseListSnapshots { - return types.ResponseListSnapshots{} + req abci.RequestListSnapshots) abci.ResponseListSnapshots { + return abci.ResponseListSnapshots{} } func (app *PersistentKVStoreApplication) LoadSnapshotChunk( - req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { - return types.ResponseLoadSnapshotChunk{} + req abci.RequestLoadSnapshotChunk) abci.ResponseLoadSnapshotChunk { + return abci.ResponseLoadSnapshotChunk{} } func (app *PersistentKVStoreApplication) OfferSnapshot( - req types.RequestOfferSnapshot) types.ResponseOfferSnapshot { - return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_ABORT} + req abci.RequestOfferSnapshot) abci.ResponseOfferSnapshot { + return abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_ABORT} } func (app *PersistentKVStoreApplication) ApplySnapshotChunk( - req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { - return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_ABORT} + req abci.RequestApplySnapshotChunk) abci.ResponseApplySnapshotChunk { + return abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_ABORT} } //--------------------------------------------- // update validators -func (app *PersistentKVStoreApplication) Validators() (validators []types.ValidatorUpdate) { +func (app *PersistentKVStoreApplication) Validators() (validators []abci.ValidatorUpdate) { itr, err := app.app.state.db.Iterator(nil, nil) if err != nil { panic(err) } for ; itr.Valid(); itr.Next() { if isValidatorTx(itr.Key()) { - validator := new(types.ValidatorUpdate) - err := types.ReadMessage(bytes.NewBuffer(itr.Value()), validator) + validator := new(abci.ValidatorUpdate) + err := abci.ReadMessage(bytes.NewBuffer(itr.Value()), validator) if err != nil { panic(err) } @@ -209,13 +209,13 @@ func isValidatorTx(tx []byte) bool { // format is "val:pubkey!power" // pubkey is a base64-encoded 32-byte ed25519 key -func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) abci.ResponseDeliverTx { tx = tx[len(ValidatorSetChangePrefix):] // get the pubkey and power pubKeyAndPower := strings.Split(string(tx), "!") if len(pubKeyAndPower) != 2 { - return types.ResponseDeliverTx{ + return abci.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Expected 'pubkey!power'. Got %v", pubKeyAndPower)} } @@ -224,7 +224,7 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // decode the pubkey pubkey, err := base64.StdEncoding.DecodeString(pubkeyS) if err != nil { - return types.ResponseDeliverTx{ + return abci.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Pubkey (%s) is invalid base64", pubkeyS)} } @@ -232,17 +232,17 @@ func (app *PersistentKVStoreApplication) execValidatorTx(tx []byte) types.Respon // decode the power power, err := strconv.ParseInt(powerS, 10, 64) if err != nil { - return types.ResponseDeliverTx{ + return abci.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Power (%s) is not an int", powerS)} } // update - return app.updateValidator(types.UpdateValidator(pubkey, power, "")) + return app.updateValidator(abci.UpdateValidator(pubkey, power, "")) } // add, update, or remove a validator -func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate) types.ResponseDeliverTx { +func (app *PersistentKVStoreApplication) updateValidator(v abci.ValidatorUpdate) abci.ResponseDeliverTx { pubkey, err := cryptoenc.PubKeyFromProto(v.PubKey) if err != nil { panic(fmt.Errorf("can't decode public key: %w", err)) @@ -257,7 +257,7 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate } if !hasKey { pubStr := base64.StdEncoding.EncodeToString(pubkey.Bytes()) - return types.ResponseDeliverTx{ + return abci.ResponseDeliverTx{ Code: code.CodeTypeUnauthorized, Log: fmt.Sprintf("Cannot remove non-existent validator %s", pubStr)} } @@ -268,8 +268,8 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate } else { // add or update validator value := bytes.NewBuffer(make([]byte, 0)) - if err := types.WriteMessage(&v, value); err != nil { - return types.ResponseDeliverTx{ + if err := abci.WriteMessage(&v, value); err != nil { + return abci.ResponseDeliverTx{ Code: code.CodeTypeEncodingError, Log: fmt.Sprintf("Error encoding validator: %v", err)} } @@ -282,5 +282,5 @@ func (app *PersistentKVStoreApplication) updateValidator(v types.ValidatorUpdate // we only update the changes array if we successfully updated the tree app.ValUpdates = append(app.ValUpdates, v) - return types.ResponseDeliverTx{Code: code.CodeTypeOK} + return abci.ResponseDeliverTx{Code: code.CodeTypeOK} } diff --git a/abci/server/grpc_server.go b/abci/server/grpc_server.go index 503f0b64f..30bfc53c2 100644 --- a/abci/server/grpc_server.go +++ b/abci/server/grpc_server.go @@ -5,9 +5,9 @@ import ( "google.golang.org/grpc" - "github.com/tendermint/tendermint/abci/types" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) type GRPCServer struct { @@ -18,11 +18,11 @@ type GRPCServer struct { listener net.Listener server *grpc.Server - app types.ABCIApplicationServer + app abci.ABCIApplicationServer } // NewGRPCServer returns a new gRPC ABCI server -func NewGRPCServer(protoAddr string, app types.ABCIApplicationServer) service.Service { +func NewGRPCServer(protoAddr string, app abci.ABCIApplicationServer) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &GRPCServer{ proto: proto, @@ -44,7 +44,7 @@ func (s *GRPCServer) OnStart() error { s.listener = ln s.server = grpc.NewServer() - types.RegisterABCIApplicationServer(s.server, s.app) + abci.RegisterABCIApplicationServer(s.server, s.app) s.Logger.Info("Listening", "proto", s.proto, "addr", s.addr) go func() { diff --git a/abci/server/server.go b/abci/server/server.go index 6dd13ad02..c1f51c81f 100644 --- a/abci/server/server.go +++ b/abci/server/server.go @@ -11,18 +11,18 @@ package server import ( "fmt" - "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) -func NewServer(protoAddr, transport string, app types.Application) (service.Service, error) { +func NewServer(protoAddr, transport string, app abci.Application) (service.Service, error) { var s service.Service var err error switch transport { case "socket": s = NewSocketServer(protoAddr, app) case "grpc": - s = NewGRPCServer(protoAddr, types.NewGRPCApplication(app)) + s = NewGRPCServer(protoAddr, abci.NewGRPCApplication(app)) default: err = fmt.Errorf("unknown server type %s", transport) } diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index 543b444b1..f704ffad2 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -8,11 +8,11 @@ import ( "os" "runtime" - "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" tmlog "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) // var maxNumberConnections = 2 @@ -30,10 +30,10 @@ type SocketServer struct { nextConnID int appMtx tmsync.Mutex - app types.Application + app abci.Application } -func NewSocketServer(protoAddr string, app types.Application) service.Service { +func NewSocketServer(protoAddr string, app abci.Application) service.Service { proto, addr := tmnet.ProtocolAndAddress(protoAddr) s := &SocketServer{ proto: proto, @@ -120,8 +120,8 @@ func (s *SocketServer) acceptConnectionsRoutine() { connID := s.addConn(conn) - closeConn := make(chan error, 2) // Push to signal connection closed - responses := make(chan *types.Response, 1000) // A channel to buffer responses + closeConn := make(chan error, 2) // Push to signal connection closed + responses := make(chan *abci.Response, 1000) // A channel to buffer responses // Read requests from conn and deal with them go s.handleRequests(closeConn, conn, responses) @@ -152,7 +152,7 @@ func (s *SocketServer) waitForClose(closeConn chan error, connID int) { } // Read requests from conn and deal with them -func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *types.Response) { +func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, responses chan<- *abci.Response) { var count int var bufReader = bufio.NewReader(conn) @@ -174,8 +174,8 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp for { - var req = &types.Request{} - err := types.ReadMessage(bufReader, req) + var req = &abci.Request{} + err := abci.ReadMessage(bufReader, req) if err != nil { if err == io.EOF { closeConn <- err @@ -191,65 +191,65 @@ func (s *SocketServer) handleRequests(closeConn chan error, conn io.Reader, resp } } -func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types.Response) { +func (s *SocketServer) handleRequest(req *abci.Request, responses chan<- *abci.Response) { switch r := req.Value.(type) { - case *types.Request_Echo: - responses <- types.ToResponseEcho(r.Echo.Message) - case *types.Request_Flush: - responses <- types.ToResponseFlush() - case *types.Request_Info: + case *abci.Request_Echo: + responses <- abci.ToResponseEcho(r.Echo.Message) + case *abci.Request_Flush: + responses <- abci.ToResponseFlush() + case *abci.Request_Info: res := s.app.Info(*r.Info) - responses <- types.ToResponseInfo(res) - case *types.Request_DeliverTx: + responses <- abci.ToResponseInfo(res) + case *abci.Request_DeliverTx: res := s.app.DeliverTx(*r.DeliverTx) - responses <- types.ToResponseDeliverTx(res) - case *types.Request_CheckTx: + responses <- abci.ToResponseDeliverTx(res) + case *abci.Request_CheckTx: res := s.app.CheckTx(*r.CheckTx) - responses <- types.ToResponseCheckTx(res) - case *types.Request_Commit: + responses <- abci.ToResponseCheckTx(res) + case *abci.Request_Commit: res := s.app.Commit() - responses <- types.ToResponseCommit(res) - case *types.Request_Query: + responses <- abci.ToResponseCommit(res) + case *abci.Request_Query: res := s.app.Query(*r.Query) - responses <- types.ToResponseQuery(res) - case *types.Request_InitChain: + responses <- abci.ToResponseQuery(res) + case *abci.Request_InitChain: res := s.app.InitChain(*r.InitChain) - responses <- types.ToResponseInitChain(res) - case *types.Request_BeginBlock: + responses <- abci.ToResponseInitChain(res) + case *abci.Request_BeginBlock: res := s.app.BeginBlock(*r.BeginBlock) - responses <- types.ToResponseBeginBlock(res) - case *types.Request_EndBlock: + responses <- abci.ToResponseBeginBlock(res) + case *abci.Request_EndBlock: res := s.app.EndBlock(*r.EndBlock) - responses <- types.ToResponseEndBlock(res) - case *types.Request_ListSnapshots: + responses <- abci.ToResponseEndBlock(res) + case *abci.Request_ListSnapshots: res := s.app.ListSnapshots(*r.ListSnapshots) - responses <- types.ToResponseListSnapshots(res) - case *types.Request_OfferSnapshot: + responses <- abci.ToResponseListSnapshots(res) + case *abci.Request_OfferSnapshot: res := s.app.OfferSnapshot(*r.OfferSnapshot) - responses <- types.ToResponseOfferSnapshot(res) - case *types.Request_LoadSnapshotChunk: + responses <- abci.ToResponseOfferSnapshot(res) + case *abci.Request_LoadSnapshotChunk: res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk) - responses <- types.ToResponseLoadSnapshotChunk(res) - case *types.Request_ApplySnapshotChunk: + responses <- abci.ToResponseLoadSnapshotChunk(res) + case *abci.Request_ApplySnapshotChunk: res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk) - responses <- types.ToResponseApplySnapshotChunk(res) + responses <- abci.ToResponseApplySnapshotChunk(res) default: - responses <- types.ToResponseException("Unknown request") + responses <- abci.ToResponseException("Unknown request") } } // Pull responses from 'responses' and write them to conn. -func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *types.Response) { +func (s *SocketServer) handleResponses(closeConn chan error, conn io.Writer, responses <-chan *abci.Response) { var count int var bufWriter = bufio.NewWriter(conn) for { var res = <-responses - err := types.WriteMessage(res, bufWriter) + err := abci.WriteMessage(res, bufWriter) if err != nil { closeConn <- fmt.Errorf("error writing message: %w", err) return } - if _, ok := res.Value.(*types.Response_Flush); ok { + if _, ok := res.Value.(*abci.Response_Flush); ok { err = bufWriter.Flush() if err != nil { closeConn <- fmt.Errorf("error flushing write buffer: %w", err) diff --git a/abci/tests/benchmarks/parallel/parallel.go b/abci/tests/benchmarks/parallel/parallel.go index fe213313d..03309032c 100644 --- a/abci/tests/benchmarks/parallel/parallel.go +++ b/abci/tests/benchmarks/parallel/parallel.go @@ -5,8 +5,8 @@ import ( "fmt" "log" - "github.com/tendermint/tendermint/abci/types" tmnet "github.com/tendermint/tendermint/libs/net" + "github.com/tendermint/tendermint/pkg/abci" ) func main() { @@ -20,8 +20,8 @@ func main() { go func() { counter := 0 for { - var res = &types.Response{} - err := types.ReadMessage(conn, res) + var res = &abci.Response{} + err := abci.ReadMessage(conn, res) if err != nil { log.Fatal(err.Error()) } @@ -36,9 +36,9 @@ func main() { counter := 0 for i := 0; ; i++ { var bufWriter = bufio.NewWriter(conn) - var req = types.ToRequestEcho("foobar") + var req = abci.ToRequestEcho("foobar") - err := types.WriteMessage(req, bufWriter) + err := abci.WriteMessage(req, bufWriter) if err != nil { log.Fatal(err.Error()) } diff --git a/abci/tests/benchmarks/simple/simple.go b/abci/tests/benchmarks/simple/simple.go index b18eaa580..b239c2816 100644 --- a/abci/tests/benchmarks/simple/simple.go +++ b/abci/tests/benchmarks/simple/simple.go @@ -7,8 +7,8 @@ import ( "log" "reflect" - "github.com/tendermint/tendermint/abci/types" tmnet "github.com/tendermint/tendermint/libs/net" + "github.com/tendermint/tendermint/pkg/abci" ) func main() { @@ -21,7 +21,7 @@ func main() { // Make a bunch of requests counter := 0 for i := 0; ; i++ { - req := types.ToRequestEcho("foobar") + req := abci.ToRequestEcho("foobar") _, err := makeRequest(conn, req) if err != nil { log.Fatal(err.Error()) @@ -33,15 +33,15 @@ func main() { } } -func makeRequest(conn io.ReadWriter, req *types.Request) (*types.Response, error) { +func makeRequest(conn io.ReadWriter, req *abci.Request) (*abci.Response, error) { var bufWriter = bufio.NewWriter(conn) // Write desired request - err := types.WriteMessage(req, bufWriter) + err := abci.WriteMessage(req, bufWriter) if err != nil { return nil, err } - err = types.WriteMessage(types.ToRequestFlush(), bufWriter) + err = abci.WriteMessage(abci.ToRequestFlush(), bufWriter) if err != nil { return nil, err } @@ -51,17 +51,17 @@ func makeRequest(conn io.ReadWriter, req *types.Request) (*types.Response, error } // Read desired response - var res = &types.Response{} - err = types.ReadMessage(conn, res) + var res = &abci.Response{} + err = abci.ReadMessage(conn, res) if err != nil { return nil, err } - var resFlush = &types.Response{} - err = types.ReadMessage(conn, resFlush) + var resFlush = &abci.Response{} + err = abci.ReadMessage(conn, resFlush) if err != nil { return nil, err } - if _, ok := resFlush.Value.(*types.Response_Flush); !ok { + if _, ok := resFlush.Value.(*abci.Response_Flush); !ok { return nil, fmt.Errorf("expected flush response but got something else: %v", reflect.TypeOf(resFlush)) } diff --git a/abci/tests/server/client.go b/abci/tests/server/client.go index 10d4a3e58..13f56e4c2 100644 --- a/abci/tests/server/client.go +++ b/abci/tests/server/client.go @@ -8,22 +8,22 @@ import ( mrand "math/rand" abcicli "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" ) var ctx = context.Background() func InitChain(client abcicli.Client) error { total := 10 - vals := make([]types.ValidatorUpdate, total) + vals := make([]abci.ValidatorUpdate, total) for i := 0; i < total; i++ { pubkey := tmrand.Bytes(33) // nolint:gosec // G404: Use of weak random number generator power := mrand.Int() - vals[i] = types.UpdateValidator(pubkey, int64(power), "") + vals[i] = abci.UpdateValidator(pubkey, int64(power), "") } - _, err := client.InitChainSync(ctx, types.RequestInitChain{ + _, err := client.InitChainSync(ctx, abci.RequestInitChain{ Validators: vals, }) if err != nil { @@ -52,7 +52,7 @@ func Commit(client abcicli.Client, hashExp []byte) error { } func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.DeliverTxSync(ctx, types.RequestDeliverTx{Tx: txBytes}) + res, _ := client.DeliverTxSync(ctx, abci.RequestDeliverTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: DeliverTx") @@ -71,7 +71,7 @@ func DeliverTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp [] } func CheckTx(client abcicli.Client, txBytes []byte, codeExp uint32, dataExp []byte) error { - res, _ := client.CheckTxSync(ctx, types.RequestCheckTx{Tx: txBytes}) + res, _ := client.CheckTxSync(ctx, abci.RequestCheckTx{Tx: txBytes}) code, data, log := res.Code, res.Data, res.Log if code != codeExp { fmt.Println("Failed test: CheckTx") diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go index f796f4b7f..229e83def 100644 --- a/cmd/tendermint/commands/gen_node_key.go +++ b/cmd/tendermint/commands/gen_node_key.go @@ -6,7 +6,7 @@ import ( "github.com/spf13/cobra" tmjson "github.com/tendermint/tendermint/libs/json" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // GenNodeKeyCmd allows the generation of a node key. It prints JSON-encoded @@ -20,7 +20,7 @@ var GenNodeKeyCmd = &cobra.Command{ } func genNodeKey(cmd *cobra.Command, args []string) error { - nodeKey := types.GenNodeKey() + nodeKey := p2p.GenNodeKey() bz, err := tmjson.Marshal(nodeKey) if err != nil { diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go index 09f84b09e..2027ed614 100644 --- a/cmd/tendermint/commands/gen_validator.go +++ b/cmd/tendermint/commands/gen_validator.go @@ -6,8 +6,8 @@ import ( "github.com/spf13/cobra" tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/pkg/consensus" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" ) // GenValidatorCmd allows the generation of a keypair for a @@ -21,7 +21,7 @@ var GenValidatorCmd = &cobra.Command{ } func init() { - GenValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + GenValidatorCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519, "Key type to generate privval file with. Options: ed25519, secp256k1") } diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index bc94f763b..e37e5bd6d 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -11,8 +11,9 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" ) // InitFilesCmd initializes a fresh Tendermint Core instance. @@ -30,7 +31,7 @@ var ( ) func init() { - InitFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + InitFilesCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519, "Key type to generate privval file with. Options: ed25519, secp256k1") } @@ -75,7 +76,7 @@ func initFilesWithConfig(config *cfg.Config) error { if tmos.FileExists(nodeKeyFile) { logger.Info("Found node key", "path", nodeKeyFile) } else { - if _, err := types.LoadOrGenNodeKey(nodeKeyFile); err != nil { + if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil { return err } logger.Info("Generated node key", "path", nodeKeyFile) @@ -87,14 +88,14 @@ func initFilesWithConfig(config *cfg.Config) error { logger.Info("Found genesis file", "path", genFile) } else { - genDoc := types.GenesisDoc{ + genDoc := consensus.GenesisDoc{ ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)), GenesisTime: tmtime.Now(), - ConsensusParams: types.DefaultConsensusParams(), + ConsensusParams: consensus.DefaultConsensusParams(), } if keyType == "secp256k1" { - genDoc.ConsensusParams.Validator = types.ValidatorParams{ - PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + genDoc.ConsensusParams.Validator = consensus.ValidatorParams{ + PubKeyTypes: []string{consensus.ABCIPubKeyTypeSecp256k1}, } } @@ -107,7 +108,7 @@ func initFilesWithConfig(config *cfg.Config) error { if err != nil { return fmt.Errorf("can't get pubkey: %w", err) } - genDoc.Validators = []types.GenesisValidator{{ + genDoc.Validators = []consensus.GenesisValidator{{ Address: pubKey.Address(), PubKey: pubKey, Power: 10, diff --git a/cmd/tendermint/commands/reindex_event.go b/cmd/tendermint/commands/reindex_event.go index ddc585c1f..a268108d3 100644 --- a/cmd/tendermint/commands/reindex_event.go +++ b/cmd/tendermint/commands/reindex_event.go @@ -8,16 +8,16 @@ import ( "github.com/spf13/cobra" tmdb "github.com/tendermint/tm-db" - abcitypes "github.com/tendermint/tendermint/abci/types" tmcfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/progressbar" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/state/indexer/sink/kv" "github.com/tendermint/tendermint/state/indexer/sink/psql" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) const ( @@ -170,7 +170,7 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor return fmt.Errorf("not able to load ABCI Response at height %d from the statestore", i) } - e := types.EventDataNewBlockHeader{ + e := events.EventDataNewBlockHeader{ Header: b.Header, NumTxs: int64(len(b.Txs)), ResultBeginBlock: *r.BeginBlock, @@ -182,7 +182,7 @@ func eventReIndex(cmd *cobra.Command, es []indexer.EventSink, bs state.BlockStor batch = indexer.NewBatch(e.NumTxs) for i, tx := range b.Data.Txs { - tr := abcitypes.TxResult{ + tr := abci.TxResult{ Height: b.Height, Index: uint32(i), Tx: tx, diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go index 046780ef1..4445c5693 100644 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ b/cmd/tendermint/commands/reset_priv_validator.go @@ -7,8 +7,8 @@ import ( "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/pkg/consensus" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" ) // ResetAllCmd removes the database of this Tendermint core @@ -25,7 +25,7 @@ var keepAddrBook bool func init() { ResetAllCmd.Flags().BoolVar(&keepAddrBook, "keep-addr-book", false, "keep the address book intact") - ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + ResetPrivValidatorCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519, "Key type to generate privval file with. Options: ed25519, secp256k1") } diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index a7307b38f..f78fa293a 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -15,8 +15,8 @@ import ( "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/consensus" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" ) var ( @@ -74,7 +74,7 @@ func init() { "P2P Port") TestnetFilesCmd.Flags().BoolVar(&randomMonikers, "random-monikers", false, "randomize the moniker for each generated node") - TestnetFilesCmd.Flags().StringVar(&keyType, "key", types.ABCIPubKeyTypeEd25519, + TestnetFilesCmd.Flags().StringVar(&keyType, "key", consensus.ABCIPubKeyTypeEd25519, "Key type to generate privval file with. Options: ed25519, secp256k1") } @@ -121,7 +121,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { } } - genVals := make([]types.GenesisValidator, nValidators) + genVals := make([]consensus.GenesisValidator, nValidators) for i := 0; i < nValidators; i++ { nodeDirName := fmt.Sprintf("%s%d", nodeDirPrefix, i) @@ -157,7 +157,7 @@ func testnetFiles(cmd *cobra.Command, args []string) error { if err != nil { return fmt.Errorf("can't get pubkey: %w", err) } - genVals[i] = types.GenesisValidator{ + genVals[i] = consensus.GenesisValidator{ Address: pubKey.Address(), PubKey: pubKey, Power: 1, @@ -187,16 +187,16 @@ func testnetFiles(cmd *cobra.Command, args []string) error { } // Generate genesis doc from generated validators - genDoc := &types.GenesisDoc{ + genDoc := &consensus.GenesisDoc{ ChainID: "chain-" + tmrand.Str(6), GenesisTime: tmtime.Now(), InitialHeight: initialHeight, Validators: genVals, - ConsensusParams: types.DefaultConsensusParams(), + ConsensusParams: consensus.DefaultConsensusParams(), } if keyType == "secp256k1" { - genDoc.ConsensusParams.Validator = types.ValidatorParams{ - PubKeyTypes: []string{types.ABCIPubKeyTypeSecp256k1}, + genDoc.ConsensusParams.Validator = consensus.ValidatorParams{ + PubKeyTypes: []string{consensus.ABCIPubKeyTypeSecp256k1}, } } diff --git a/config/config.go b/config/config.go index 7d19616aa..6350449d7 100644 --- a/config/config.go +++ b/config/config.go @@ -13,7 +13,7 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) const ( @@ -288,23 +288,23 @@ func (cfg BaseConfig) NodeKeyFile() string { } // LoadNodeKey loads NodeKey located in filePath. -func (cfg BaseConfig) LoadNodeKeyID() (types.NodeID, error) { +func (cfg BaseConfig) LoadNodeKeyID() (p2p.NodeID, error) { jsonBytes, err := ioutil.ReadFile(cfg.NodeKeyFile()) if err != nil { return "", err } - nodeKey := types.NodeKey{} + nodeKey := p2p.NodeKey{} err = tmjson.Unmarshal(jsonBytes, &nodeKey) if err != nil { return "", err } - nodeKey.ID = types.NodeIDFromPubKey(nodeKey.PubKey()) + nodeKey.ID = p2p.NodeIDFromPubKey(nodeKey.PubKey()) return nodeKey.ID, nil } // LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. If // the file does not exist, it generates and saves a new NodeKey. -func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) { +func (cfg BaseConfig) LoadOrGenNodeKeyID() (p2p.NodeID, error) { if tmos.FileExists(cfg.NodeKeyFile()) { nodeKey, err := cfg.LoadNodeKeyID() if err != nil { @@ -313,7 +313,7 @@ func (cfg BaseConfig) LoadOrGenNodeKeyID() (types.NodeID, error) { return nodeKey, nil } - nodeKey := types.GenNodeKey() + nodeKey := p2p.GenNodeKey() if err := nodeKey.SaveAs(cfg.NodeKeyFile()); err != nil { return "", err diff --git a/docs/tutorials/go-built-in.md b/docs/tutorials/go-built-in.md index 81325706b..d7be5cfe1 100644 --- a/docs/tutorials/go-built-in.md +++ b/docs/tutorials/go-built-in.md @@ -87,7 +87,7 @@ Create a file called `app.go` with the following content: package main import ( - abcitypes "github.com/tendermint/tendermint/abci/types" + abcitypes "github.com/tendermint/tendermint/pkg/abci" ) type KVStoreApplication struct {} @@ -346,7 +346,7 @@ import ( "github.com/dgraph-io/badger" "github.com/spf13/viper" - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" cfg "github.com/tendermint/tendermint/config" tmflags "github.com/tendermint/tendermint/libs/cli/flags" "github.com/tendermint/tendermint/libs/log" diff --git a/docs/tutorials/go.md b/docs/tutorials/go.md index 9461f2cea..6c2896149 100644 --- a/docs/tutorials/go.md +++ b/docs/tutorials/go.md @@ -90,7 +90,7 @@ Create a file called `app.go` with the following content: package main import ( - abcitypes "github.com/tendermint/tendermint/abci/types" + abcitypes "github.com/tendermint/tendermint/pkg/abci" ) type KVStoreApplication struct {} diff --git a/internal/blocksync/msgs.go b/internal/blocksync/msgs.go index caad44b7b..fd74c6f3a 100644 --- a/internal/blocksync/msgs.go +++ b/internal/blocksync/msgs.go @@ -1,12 +1,12 @@ package blocksync import ( + "github.com/tendermint/tendermint/pkg/metadata" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" ) const ( - MaxMsgSize = types.MaxBlockSizeBytes + + MaxMsgSize = metadata.MaxBlockSizeBytes + bcproto.BlockResponseMessagePrefixSize + bcproto.BlockResponseMessageFieldKeySize ) diff --git a/internal/blocksync/v0/pool.go b/internal/blocksync/v0/pool.go index b3704f333..14285747f 100644 --- a/internal/blocksync/v0/pool.go +++ b/internal/blocksync/v0/pool.go @@ -11,7 +11,8 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/p2p" ) /* @@ -62,7 +63,7 @@ var peerTimeout = 15 * time.Second // not const so we can override with tests // PeerID responsible for delivering the block. type BlockRequest struct { Height int64 - PeerID types.NodeID + PeerID p2p.NodeID } // BlockPool keeps track of the block sync peers, block requests and block responses. @@ -75,7 +76,7 @@ type BlockPool struct { requesters map[int64]*bpRequester height int64 // the lowest key in requesters. // peers - peers map[types.NodeID]*bpPeer + peers map[p2p.NodeID]*bpPeer maxPeerHeight int64 // the biggest reported height // atomic @@ -93,7 +94,7 @@ type BlockPool struct { // requests and errors will be sent to requestsCh and errorsCh accordingly. func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { bp := &BlockPool{ - peers: make(map[types.NodeID]*bpPeer), + peers: make(map[p2p.NodeID]*bpPeer), requesters: make(map[int64]*bpRequester), height: start, @@ -197,7 +198,7 @@ func (pool *BlockPool) IsCaughtUp() bool { // We need to see the second block's Commit to validate the first block. // So we peek two blocks at a time. // The caller will verify the commit. -func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { +func (pool *BlockPool) PeekTwoBlocks() (first *block.Block, second *block.Block) { pool.mtx.RLock() defer pool.mtx.RUnlock() @@ -244,13 +245,13 @@ func (pool *BlockPool) PopRequest() { // RedoRequest invalidates the block at pool.height, // Remove the peer and redo request from others. // Returns the ID of the removed peer. -func (pool *BlockPool) RedoRequest(height int64) types.NodeID { +func (pool *BlockPool) RedoRequest(height int64) p2p.NodeID { pool.mtx.Lock() defer pool.mtx.Unlock() request := pool.requesters[height] peerID := request.getPeerID() - if peerID != types.NodeID("") { + if peerID != p2p.NodeID("") { // RemovePeer will redo all requesters associated with this peer. pool.removePeer(peerID) } @@ -259,7 +260,7 @@ func (pool *BlockPool) RedoRequest(height int64) types.NodeID { // AddBlock validates that the block comes from the peer it was expected from and calls the requester to store it. // TODO: ensure that blocks come in order for each peer. -func (pool *BlockPool) AddBlock(peerID types.NodeID, block *types.Block, blockSize int) { +func (pool *BlockPool) AddBlock(peerID p2p.NodeID, block *block.Block, blockSize int) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -306,7 +307,7 @@ func (pool *BlockPool) LastAdvance() time.Time { } // SetPeerRange sets the peer's alleged blockchain base and height. -func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int64) { +func (pool *BlockPool) SetPeerRange(peerID p2p.NodeID, base int64, height int64) { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -327,14 +328,14 @@ func (pool *BlockPool) SetPeerRange(peerID types.NodeID, base int64, height int6 // RemovePeer removes the peer with peerID from the pool. If there's no peer // with peerID, function is a no-op. -func (pool *BlockPool) RemovePeer(peerID types.NodeID) { +func (pool *BlockPool) RemovePeer(peerID p2p.NodeID) { pool.mtx.Lock() defer pool.mtx.Unlock() pool.removePeer(peerID) } -func (pool *BlockPool) removePeer(peerID types.NodeID) { +func (pool *BlockPool) removePeer(peerID p2p.NodeID) { for _, requester := range pool.requesters { if requester.getPeerID() == peerID { requester.redo(peerID) @@ -415,14 +416,14 @@ func (pool *BlockPool) requestersLen() int64 { return int64(len(pool.requesters)) } -func (pool *BlockPool) sendRequest(height int64, peerID types.NodeID) { +func (pool *BlockPool) sendRequest(height int64, peerID p2p.NodeID) { if !pool.IsRunning() { return } pool.requestsCh <- BlockRequest{height, peerID} } -func (pool *BlockPool) sendError(err error, peerID types.NodeID) { +func (pool *BlockPool) sendError(err error, peerID p2p.NodeID) { if !pool.IsRunning() { return } @@ -470,7 +471,7 @@ type bpPeer struct { height int64 base int64 pool *BlockPool - id types.NodeID + id p2p.NodeID recvMonitor *flow.Monitor timeout *time.Timer @@ -478,7 +479,7 @@ type bpPeer struct { logger log.Logger } -func newBPPeer(pool *BlockPool, peerID types.NodeID, base int64, height int64) *bpPeer { +func newBPPeer(pool *BlockPool, peerID p2p.NodeID, base int64, height int64) *bpPeer { peer := &bpPeer{ pool: pool, id: peerID, @@ -543,11 +544,11 @@ type bpRequester struct { pool *BlockPool height int64 gotBlockCh chan struct{} - redoCh chan types.NodeID // redo may send multitime, add peerId to identify repeat + redoCh chan p2p.NodeID // redo may send multitime, add peerId to identify repeat mtx tmsync.Mutex - peerID types.NodeID - block *types.Block + peerID p2p.NodeID + block *block.Block } func newBPRequester(pool *BlockPool, height int64) *bpRequester { @@ -555,7 +556,7 @@ func newBPRequester(pool *BlockPool, height int64) *bpRequester { pool: pool, height: height, gotBlockCh: make(chan struct{}, 1), - redoCh: make(chan types.NodeID, 1), + redoCh: make(chan p2p.NodeID, 1), peerID: "", block: nil, @@ -570,7 +571,7 @@ func (bpr *bpRequester) OnStart() error { } // Returns true if the peer matches and block doesn't already exist. -func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool { +func (bpr *bpRequester) setBlock(block *block.Block, peerID p2p.NodeID) bool { bpr.mtx.Lock() if bpr.block != nil || bpr.peerID != peerID { bpr.mtx.Unlock() @@ -586,13 +587,13 @@ func (bpr *bpRequester) setBlock(block *types.Block, peerID types.NodeID) bool { return true } -func (bpr *bpRequester) getBlock() *types.Block { +func (bpr *bpRequester) getBlock() *block.Block { bpr.mtx.Lock() defer bpr.mtx.Unlock() return bpr.block } -func (bpr *bpRequester) getPeerID() types.NodeID { +func (bpr *bpRequester) getPeerID() p2p.NodeID { bpr.mtx.Lock() defer bpr.mtx.Unlock() return bpr.peerID @@ -614,7 +615,7 @@ func (bpr *bpRequester) reset() { // Tells bpRequester to pick another peer and try again. // NOTE: Nonblocking, and does nothing if another redo // was already requested. -func (bpr *bpRequester) redo(peerID types.NodeID) { +func (bpr *bpRequester) redo(peerID p2p.NodeID) { select { case bpr.redoCh <- peerID: default: diff --git a/internal/blocksync/v0/pool_test.go b/internal/blocksync/v0/pool_test.go index 67617d2b7..f621d9491 100644 --- a/internal/blocksync/v0/pool_test.go +++ b/internal/blocksync/v0/pool_test.go @@ -11,7 +11,9 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" ) func init() { @@ -19,7 +21,7 @@ func init() { } type testPeer struct { - id types.NodeID + id p2p.NodeID base int64 height int64 inputChan chan inputData // make sure each peer's data is sequential @@ -41,7 +43,7 @@ func (p testPeer) runInputRoutine() { // Request desired, pretend like we got the block immediately. func (p testPeer) simulateInput(input inputData) { - block := &types.Block{Header: types.Header{Height: input.request.Height}} + block := &block.Block{Header: metadata.Header{Height: input.request.Height}} input.pool.AddBlock(input.request.PeerID, block, 123) // TODO: uncommenting this creates a race which is detected by: // https://github.com/golang/go/blob/2bd767b1022dd3254bcec469f0ee164024726486/src/testing/testing.go#L854-L856 @@ -49,7 +51,7 @@ func (p testPeer) simulateInput(input inputData) { // input.t.Logf("Added block from peer %v (height: %v)", input.request.PeerID, input.request.Height) } -type testPeers map[types.NodeID]testPeer +type testPeers map[p2p.NodeID]testPeer func (ps testPeers) start() { for _, v := range ps { @@ -66,7 +68,7 @@ func (ps testPeers) stop() { func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { peers := make(testPeers, numPeers) for i := 0; i < numPeers; i++ { - peerID := types.NodeID(tmrand.Str(12)) + peerID := p2p.NodeID(tmrand.Str(12)) height := minHeight + mrand.Int63n(maxHeight-minHeight) base := minHeight + int64(i) if base > height { @@ -182,7 +184,7 @@ func TestBlockPoolTimeout(t *testing.T) { // Pull from channels counter := 0 - timedOut := map[types.NodeID]struct{}{} + timedOut := map[p2p.NodeID]struct{}{} for { select { case err := <-errorsCh: @@ -203,7 +205,7 @@ func TestBlockPoolTimeout(t *testing.T) { func TestBlockPoolRemovePeer(t *testing.T) { peers := make(testPeers, 10) for i := 0; i < 10; i++ { - peerID := types.NodeID(fmt.Sprintf("%d", i+1)) + peerID := p2p.NodeID(fmt.Sprintf("%d", i+1)) height := int64(i + 1) peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)} } @@ -227,10 +229,10 @@ func TestBlockPoolRemovePeer(t *testing.T) { assert.EqualValues(t, 10, pool.MaxPeerHeight()) // remove not-existing peer - assert.NotPanics(t, func() { pool.RemovePeer(types.NodeID("Superman")) }) + assert.NotPanics(t, func() { pool.RemovePeer(p2p.NodeID("Superman")) }) // remove peer with biggest height - pool.RemovePeer(types.NodeID("10")) + pool.RemovePeer(p2p.NodeID("10")) assert.EqualValues(t, 9, pool.MaxPeerHeight()) // remove all peers diff --git a/internal/blocksync/v0/reactor.go b/internal/blocksync/v0/reactor.go index c43959808..857dfbf4e 100644 --- a/internal/blocksync/v0/reactor.go +++ b/internal/blocksync/v0/reactor.go @@ -12,10 +12,12 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmSync "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) var ( @@ -67,7 +69,7 @@ type consensusReactor interface { type peerError struct { err error - peerID types.NodeID + peerID p2ptypes.NodeID } func (e peerError) Error() string { @@ -205,7 +207,7 @@ func (r *Reactor) OnStop() { // respondToPeer loads a block and sends it to the requesting peer, if we have it. // Otherwise, we'll respond saying we do not have it. -func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID types.NodeID) { +func (r *Reactor) respondToPeer(msg *bcproto.BlockRequest, peerID p2ptypes.NodeID) { block := r.store.LoadBlock(msg.Height) if block != nil { blockProto, err := block.ToProto() @@ -240,7 +242,7 @@ func (r *Reactor) handleBlockchainMessage(envelope p2p.Envelope) error { r.respondToPeer(msg, envelope.From) case *bcproto.BlockResponse: - block, err := types.BlockFromProto(msg.Block) + block, err := block.BlockFromProto(msg.Block) if err != nil { logger.Error("failed to convert block from proto", "err", err) return err @@ -531,9 +533,9 @@ FOR_LOOP: } var ( - firstParts = first.MakePartSet(types.BlockPartSizeBytes) + firstParts = first.MakePartSet(metadata.BlockPartSizeBytes) firstPartSetHeader = firstParts.Header() - firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} + firstID = metadata.BlockID{Hash: first.Hash(), PartSetHeader: firstPartSetHeader} ) // Finally, verify the first block using the second's commit. diff --git a/internal/blocksync/v0/reactor_test.go b/internal/blocksync/v0/reactor_test.go index e038b57af..fd45b978a 100644 --- a/internal/blocksync/v0/reactor_test.go +++ b/internal/blocksync/v0/reactor_test.go @@ -7,7 +7,6 @@ import ( "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cons "github.com/tendermint/tendermint/internal/consensus" "github.com/tendermint/tendermint/internal/mempool/mock" @@ -15,34 +14,37 @@ import ( "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" ) type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - nodes []types.NodeID + nodes []p2ptypes.NodeID - reactors map[types.NodeID]*Reactor - app map[types.NodeID]proxy.AppConns + reactors map[p2ptypes.NodeID]*Reactor + app map[p2ptypes.NodeID]proxy.AppConns - blockchainChannels map[types.NodeID]*p2p.Channel - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates + blockchainChannels map[p2ptypes.NodeID]*p2p.Channel + peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate + peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates blockSync bool } func setup( t *testing.T, - genDoc *types.GenesisDoc, - privVal types.PrivValidator, + genDoc *consensus.GenesisDoc, + privVal consensus.PrivValidator, maxBlockHeights []int64, chBuf uint, ) *reactorTestSuite { @@ -55,12 +57,12 @@ func setup( rts := &reactorTestSuite{ logger: log.TestingLogger().With("module", "blockchain", "testCase", t.Name()), network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - nodes: make([]types.NodeID, 0, numNodes), - reactors: make(map[types.NodeID]*Reactor, numNodes), - app: make(map[types.NodeID]proxy.AppConns, numNodes), - blockchainChannels: make(map[types.NodeID]*p2p.Channel, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + nodes: make([]p2ptypes.NodeID, 0, numNodes), + reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes), + app: make(map[p2ptypes.NodeID]proxy.AppConns, numNodes), + blockchainChannels: make(map[p2ptypes.NodeID]*p2p.Channel, numNodes), + peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numNodes), blockSync: true, } @@ -89,9 +91,9 @@ func setup( } func (rts *reactorTestSuite) addNode(t *testing.T, - nodeID types.NodeID, - genDoc *types.GenesisDoc, - privVal types.PrivValidator, + nodeID p2ptypes.NodeID, + genDoc *consensus.GenesisDoc, + privVal consensus.PrivValidator, maxBlockHeight int64, ) { t.Helper() @@ -119,7 +121,7 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) + lastCommit := metadata.NewCommit(blockHeight-1, 0, metadata.BlockID{}, nil) if blockHeight > 1 { lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) @@ -134,17 +136,17 @@ func (rts *reactorTestSuite) addNode(t *testing.T, ) require.NoError(t, err) - lastCommit = types.NewCommit( + lastCommit = metadata.NewCommit( vote.Height, vote.Round, lastBlockMeta.BlockID, - []types.CommitSig{vote.CommitSig()}, + []metadata.CommitSig{vote.CommitSig()}, ) } thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} + thisParts := thisBlock.MakePartSet(metadata.BlockPartSizeBytes) + blockID := metadata.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} state, err = blockExec.ApplyBlock(state, blockID, thisBlock) require.NoError(t, err) diff --git a/internal/blocksync/v2/internal/behavior/peer_behaviour.go b/internal/blocksync/v2/internal/behavior/peer_behaviour.go index 90948d888..d3f00d97b 100644 --- a/internal/blocksync/v2/internal/behavior/peer_behaviour.go +++ b/internal/blocksync/v2/internal/behavior/peer_behaviour.go @@ -1,12 +1,12 @@ package behavior -import "github.com/tendermint/tendermint/types" +import "github.com/tendermint/tendermint/pkg/p2p" // PeerBehavior is a struct describing a behavior a peer performed. // `peerID` identifies the peer and reason characterizes the specific // behavior performed by the peer. type PeerBehavior struct { - peerID types.NodeID + peerID p2p.NodeID reason interface{} } @@ -15,7 +15,7 @@ type badMessage struct { } // BadMessage returns a badMessage PeerBehavior. -func BadMessage(peerID types.NodeID, explanation string) PeerBehavior { +func BadMessage(peerID p2p.NodeID, explanation string) PeerBehavior { return PeerBehavior{peerID: peerID, reason: badMessage{explanation}} } @@ -24,7 +24,7 @@ type messageOutOfOrder struct { } // MessageOutOfOrder returns a messagOutOfOrder PeerBehavior. -func MessageOutOfOrder(peerID types.NodeID, explanation string) PeerBehavior { +func MessageOutOfOrder(peerID p2p.NodeID, explanation string) PeerBehavior { return PeerBehavior{peerID: peerID, reason: messageOutOfOrder{explanation}} } @@ -33,7 +33,7 @@ type consensusVote struct { } // ConsensusVote returns a consensusVote PeerBehavior. -func ConsensusVote(peerID types.NodeID, explanation string) PeerBehavior { +func ConsensusVote(peerID p2p.NodeID, explanation string) PeerBehavior { return PeerBehavior{peerID: peerID, reason: consensusVote{explanation}} } @@ -42,6 +42,6 @@ type blockPart struct { } // BlockPart returns blockPart PeerBehavior. -func BlockPart(peerID types.NodeID, explanation string) PeerBehavior { +func BlockPart(peerID p2p.NodeID, explanation string) PeerBehavior { return PeerBehavior{peerID: peerID, reason: blockPart{explanation}} } diff --git a/internal/blocksync/v2/internal/behavior/reporter.go b/internal/blocksync/v2/internal/behavior/reporter.go index c150a98d5..fad94d5b9 100644 --- a/internal/blocksync/v2/internal/behavior/reporter.go +++ b/internal/blocksync/v2/internal/behavior/reporter.go @@ -5,7 +5,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // Reporter provides an interface for reactors to report the behavior @@ -52,14 +52,14 @@ func (spbr *SwitchReporter) Report(behavior PeerBehavior) error { // behavior in manufactured scenarios. type MockReporter struct { mtx tmsync.RWMutex - pb map[types.NodeID][]PeerBehavior + pb map[p2ptypes.NodeID][]PeerBehavior } // NewMockReporter returns a Reporter which records all reported // behaviors in memory. func NewMockReporter() *MockReporter { return &MockReporter{ - pb: map[types.NodeID][]PeerBehavior{}, + pb: map[p2ptypes.NodeID][]PeerBehavior{}, } } @@ -73,7 +73,7 @@ func (mpbr *MockReporter) Report(behavior PeerBehavior) error { } // GetBehaviors returns all behaviors reported on the peer identified by peerID. -func (mpbr *MockReporter) GetBehaviors(peerID types.NodeID) []PeerBehavior { +func (mpbr *MockReporter) GetBehaviors(peerID p2ptypes.NodeID) []PeerBehavior { mpbr.mtx.RLock() defer mpbr.mtx.RUnlock() if items, ok := mpbr.pb[peerID]; ok { diff --git a/internal/blocksync/v2/internal/behavior/reporter_test.go b/internal/blocksync/v2/internal/behavior/reporter_test.go index 861a63df0..f4bbc15ea 100644 --- a/internal/blocksync/v2/internal/behavior/reporter_test.go +++ b/internal/blocksync/v2/internal/behavior/reporter_test.go @@ -5,13 +5,13 @@ import ( "testing" bh "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // TestMockReporter tests the MockReporter's ability to store reported // peer behavior in memory indexed by the peerID. func TestMockReporter(t *testing.T) { - var peerID types.NodeID = "MockPeer" + var peerID p2p.NodeID = "MockPeer" pr := bh.NewMockReporter() behaviors := pr.GetBehaviors(peerID) @@ -34,7 +34,7 @@ func TestMockReporter(t *testing.T) { } type scriptItem struct { - peerID types.NodeID + peerID p2p.NodeID behavior bh.PeerBehavior } @@ -76,10 +76,10 @@ func equalBehaviors(a []bh.PeerBehavior, b []bh.PeerBehavior) bool { // freequencies that those behaviors occur. func TestEqualPeerBehaviors(t *testing.T) { var ( - peerID types.NodeID = "MockPeer" - consensusVote = bh.ConsensusVote(peerID, "voted") - blockPart = bh.BlockPart(peerID, "blocked") - equals = []struct { + peerID p2p.NodeID = "MockPeer" + consensusVote = bh.ConsensusVote(peerID, "voted") + blockPart = bh.BlockPart(peerID, "blocked") + equals = []struct { left []bh.PeerBehavior right []bh.PeerBehavior }{ @@ -128,7 +128,7 @@ func TestEqualPeerBehaviors(t *testing.T) { func TestMockPeerBehaviorReporterConcurrency(t *testing.T) { var ( behaviorScript = []struct { - peerID types.NodeID + peerID p2p.NodeID behaviors []bh.PeerBehavior }{ {"1", []bh.PeerBehavior{bh.ConsensusVote("1", "")}}, diff --git a/internal/blocksync/v2/io.go b/internal/blocksync/v2/io.go index 743428516..2d62c0c15 100644 --- a/internal/blocksync/v2/io.go +++ b/internal/blocksync/v2/io.go @@ -5,9 +5,9 @@ import ( "github.com/gogo/protobuf/proto" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/pkg/block" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) var ( @@ -16,7 +16,7 @@ var ( type iIO interface { sendBlockRequest(peer p2p.Peer, height int64) error - sendBlockToPeer(block *types.Block, peer p2p.Peer) error + sendBlockToPeer(block *block.Block, peer p2p.Peer) error sendBlockNotFound(height int64, peer p2p.Peer) error sendStatusResponse(base, height int64, peer p2p.Peer) error @@ -90,7 +90,7 @@ func (sio *switchIO) sendStatusResponse(base int64, height int64, peer p2p.Peer) return nil } -func (sio *switchIO) sendBlockToPeer(block *types.Block, peer p2p.Peer) error { +func (sio *switchIO) sendBlockToPeer(block *block.Block, peer p2p.Peer) error { if block == nil { panic("trying to send nil block") } diff --git a/internal/blocksync/v2/processor.go b/internal/blocksync/v2/processor.go index b448e7d8a..21416d036 100644 --- a/internal/blocksync/v2/processor.go +++ b/internal/blocksync/v2/processor.go @@ -3,8 +3,10 @@ package v2 import ( "fmt" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" tmState "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) // Events generated by the processor: @@ -12,8 +14,8 @@ import ( type pcBlockVerificationFailure struct { priorityNormal height int64 - firstPeerID types.NodeID - secondPeerID types.NodeID + firstPeerID p2p.NodeID + secondPeerID p2p.NodeID } func (e pcBlockVerificationFailure) String() string { @@ -25,7 +27,7 @@ func (e pcBlockVerificationFailure) String() string { type pcBlockProcessed struct { priorityNormal height int64 - peerID types.NodeID + peerID p2p.NodeID } func (e pcBlockProcessed) String() string { @@ -44,8 +46,8 @@ func (p pcFinished) Error() string { } type queueItem struct { - block *types.Block - peerID types.NodeID + block *block.Block + peerID p2p.NodeID } type blockQueue map[int64]queueItem @@ -94,7 +96,7 @@ func (state *pcState) synced() bool { return len(state.queue) <= 1 } -func (state *pcState) enqueue(peerID types.NodeID, block *types.Block, height int64) { +func (state *pcState) enqueue(peerID p2p.NodeID, block *block.Block, height int64) { if item, ok := state.queue[height]; ok { panic(fmt.Sprintf( "duplicate block %d (%X) enqueued by processor (sent by %v; existing block %X from %v)", @@ -109,7 +111,7 @@ func (state *pcState) height() int64 { } // purgePeer moves all unprocessed blocks from the queue -func (state *pcState) purgePeer(peerID types.NodeID) { +func (state *pcState) purgePeer(peerID p2p.NodeID) { // what if height is less than state.height? for height, item := range state.queue { if item.peerID == peerID { @@ -159,8 +161,8 @@ func (state *pcState) handle(event Event) (Event, error) { var ( first, second = firstItem.block, secondItem.block - firstParts = first.MakePartSet(types.BlockPartSizeBytes) - firstID = types.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} + firstParts = first.MakePartSet(metadata.BlockPartSizeBytes) + firstID = metadata.BlockID{Hash: first.Hash(), PartSetHeader: firstParts.Header()} ) // verify if +second+ last commit "confirms" +first+ block diff --git a/internal/blocksync/v2/processor_context.go b/internal/blocksync/v2/processor_context.go index bc6852565..71ea1480d 100644 --- a/internal/blocksync/v2/processor_context.go +++ b/internal/blocksync/v2/processor_context.go @@ -4,17 +4,18 @@ import ( "fmt" cons "github.com/tendermint/tendermint/internal/consensus" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) type processorContext interface { - applyBlock(blockID types.BlockID, block *types.Block) error - verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error - saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) + applyBlock(blockID metadata.BlockID, block *block.Block) error + verifyCommit(chainID string, blockID metadata.BlockID, height int64, commit *metadata.Commit) error + saveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) tmState() state.State setState(state.State) - recordConsMetrics(block *types.Block) + recordConsMetrics(block *block.Block) } type pContext struct { @@ -33,7 +34,7 @@ func newProcessorContext(st blockStore, ex blockApplier, s state.State, m *cons. } } -func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { +func (pc *pContext) applyBlock(blockID metadata.BlockID, block *block.Block) error { newState, err := pc.applier.ApplyBlock(pc.state, blockID, block) pc.state = newState return err @@ -47,15 +48,15 @@ func (pc *pContext) setState(state state.State) { pc.state = state } -func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { +func (pc pContext) verifyCommit(chainID string, blockID metadata.BlockID, height int64, commit *metadata.Commit) error { return pc.state.Validators.VerifyCommitLight(chainID, blockID, height, commit) } -func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (pc *pContext) saveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) { pc.store.SaveBlock(block, blockParts, seenCommit) } -func (pc *pContext) recordConsMetrics(block *types.Block) { +func (pc *pContext) recordConsMetrics(block *block.Block) { pc.metrics.RecordConsMetrics(block) } @@ -76,7 +77,7 @@ func newMockProcessorContext( } } -func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) error { +func (mpc *mockPContext) applyBlock(blockID metadata.BlockID, block *block.Block) error { for _, h := range mpc.applicationBL { if h == block.Height { return fmt.Errorf("generic application error") @@ -86,7 +87,7 @@ func (mpc *mockPContext) applyBlock(blockID types.BlockID, block *types.Block) e return nil } -func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { +func (mpc *mockPContext) verifyCommit(chainID string, blockID metadata.BlockID, height int64, commit *metadata.Commit) error { for _, h := range mpc.verificationBL { if h == height { return fmt.Errorf("generic verification error") @@ -95,7 +96,7 @@ func (mpc *mockPContext) verifyCommit(chainID string, blockID types.BlockID, hei return nil } -func (mpc *mockPContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (mpc *mockPContext) saveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) { } @@ -107,6 +108,6 @@ func (mpc *mockPContext) tmState() state.State { return mpc.state } -func (mpc *mockPContext) recordConsMetrics(block *types.Block) { +func (mpc *mockPContext) recordConsMetrics(block *block.Block) { } diff --git a/internal/blocksync/v2/processor_test.go b/internal/blocksync/v2/processor_test.go index f7d51112b..31134fa25 100644 --- a/internal/blocksync/v2/processor_test.go +++ b/internal/blocksync/v2/processor_test.go @@ -5,8 +5,10 @@ import ( "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" tmState "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) // pcBlock is a test helper structure with simple types. Its purpose is to help with test readability. @@ -26,8 +28,8 @@ type params struct { } // makePcBlock makes an empty block. -func makePcBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} +func makePcBlock(height int64) *block.Block { + return &block.Block{Header: metadata.Header{Height: height}} } // makeState takes test parameters and creates a specific processor state. @@ -39,7 +41,7 @@ func makeState(p *params) *pcState { state := newPcState(context) for _, item := range p.items { - state.enqueue(types.NodeID(item.pid), makePcBlock(item.height), item.height) + state.enqueue(p2p.NodeID(item.pid), makePcBlock(item.height), item.height) } state.blocksSynced = p.blocksSynced @@ -47,7 +49,7 @@ func makeState(p *params) *pcState { return state } -func mBlockResponse(peerID types.NodeID, height int64) scBlockReceived { +func mBlockResponse(peerID p2p.NodeID, height int64) scBlockReceived { return scBlockReceived{ peerID: peerID, block: makePcBlock(height), diff --git a/internal/blocksync/v2/reactor.go b/internal/blocksync/v2/reactor.go index caa5d73f0..de8fbb548 100644 --- a/internal/blocksync/v2/reactor.go +++ b/internal/blocksync/v2/reactor.go @@ -14,9 +14,11 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/sync" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) const ( @@ -25,8 +27,8 @@ const ( ) type blockStore interface { - LoadBlock(height int64) *types.Block - SaveBlock(*types.Block, *types.PartSet, *types.Commit) + LoadBlock(height int64) *block.Block + SaveBlock(*block.Block, *metadata.PartSet, *metadata.Commit) Base() int64 Height() int64 } @@ -56,7 +58,7 @@ type BlockchainReactor struct { } type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) + ApplyBlock(state state.State, blockID metadata.BlockID, block *block.Block) (state.State, error) } // XXX: unify naming in this package around tmState @@ -227,9 +229,9 @@ func (e rProcessBlock) String() string { type bcBlockResponse struct { priorityNormal time time.Time - peerID types.NodeID + peerID p2ptypes.NodeID size int64 - block *types.Block + block *block.Block } func (resp bcBlockResponse) String() string { @@ -241,7 +243,7 @@ func (resp bcBlockResponse) String() string { type bcNoBlockResponse struct { priorityNormal time time.Time - peerID types.NodeID + peerID p2ptypes.NodeID height int64 } @@ -254,7 +256,7 @@ func (resp bcNoBlockResponse) String() string { type bcStatusResponse struct { priorityNormal time time.Time - peerID types.NodeID + peerID p2ptypes.NodeID base int64 height int64 } @@ -267,7 +269,7 @@ func (resp bcStatusResponse) String() string { // new peer is connected type bcAddNewPeer struct { priorityNormal - peerID types.NodeID + peerID p2ptypes.NodeID } func (resp bcAddNewPeer) String() string { @@ -277,7 +279,7 @@ func (resp bcAddNewPeer) String() string { // existing peer is removed type bcRemovePeer struct { priorityHigh - peerID types.NodeID + peerID p2ptypes.NodeID reason interface{} } @@ -536,7 +538,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { r.mtx.RUnlock() case *bcproto.Message_BlockResponse: - bi, err := types.BlockFromProto(msg.BlockResponse.Block) + bi, err := block.BlockFromProto(msg.BlockResponse.Block) if err != nil { logger.Error("error transitioning block from protobuf", "err", err) _ = r.reporter.Report(behavior.BadMessage(src.ID(), err.Error())) diff --git a/internal/blocksync/v2/reactor_test.go b/internal/blocksync/v2/reactor_test.go index 4120b3942..a4e63d8f4 100644 --- a/internal/blocksync/v2/reactor_test.go +++ b/internal/blocksync/v2/reactor_test.go @@ -13,7 +13,6 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/blocksync/v2/internal/behavior" cons "github.com/tendermint/tendermint/internal/consensus" @@ -23,21 +22,25 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" tmstore "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) type mockPeer struct { service.Service - id types.NodeID + id p2ptypes.NodeID } func (mp mockPeer) FlushStop() {} -func (mp mockPeer) ID() types.NodeID { return mp.id } +func (mp mockPeer) ID() p2ptypes.NodeID { return mp.id } func (mp mockPeer) RemoteIP() net.IP { return net.IP{} } func (mp mockPeer) RemoteAddr() net.Addr { return &net.TCPAddr{IP: mp.RemoteIP(), Port: 8800} } @@ -45,8 +48,8 @@ func (mp mockPeer) IsOutbound() bool { return true } func (mp mockPeer) IsPersistent() bool { return true } func (mp mockPeer) CloseConn() error { return nil } -func (mp mockPeer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ +func (mp mockPeer) NodeInfo() p2ptypes.NodeInfo { + return p2ptypes.NodeInfo{ NodeID: "", ListenAddr: "", } @@ -62,7 +65,7 @@ func (mp mockPeer) Get(string) interface{} { return struct{}{} } //nolint:unused type mockBlockStore struct { - blocks map[int64]*types.Block + blocks map[int64]*block.Block } //nolint:unused @@ -71,12 +74,12 @@ func (ml *mockBlockStore) Height() int64 { } //nolint:unused -func (ml *mockBlockStore) LoadBlock(height int64) *types.Block { +func (ml *mockBlockStore) LoadBlock(height int64) *block.Block { return ml.blocks[height] } //nolint:unused -func (ml *mockBlockStore) SaveBlock(block *types.Block, part *types.PartSet, commit *types.Commit) { +func (ml *mockBlockStore) SaveBlock(block *block.Block, part *metadata.PartSet, commit *metadata.Commit) { ml.blocks[block.Height] = block } @@ -85,7 +88,7 @@ type mockBlockApplier struct { // XXX: Add whitelist/blacklist? func (mba *mockBlockApplier) ApplyBlock( - state sm.State, blockID types.BlockID, block *types.Block, + state sm.State, blockID metadata.BlockID, block *block.Block, ) (sm.State, error) { state.LastBlockHeight++ return state, nil @@ -113,7 +116,7 @@ func (sio *mockSwitchIo) sendStatusResponse(_, _ int64, _ p2p.Peer) error { return nil } -func (sio *mockSwitchIo) sendBlockToPeer(_ *types.Block, _ p2p.Peer) error { +func (sio *mockSwitchIo) sendBlockToPeer(_ *block.Block, _ p2p.Peer) error { sio.mtx.Lock() defer sio.mtx.Unlock() sio.numBlockResponse++ @@ -147,8 +150,8 @@ func (sio *mockSwitchIo) sendStatusRequest(_ p2p.Peer) error { type testReactorParams struct { logger log.Logger - genDoc *types.GenesisDoc - privVals []types.PrivValidator + genDoc *consensus.GenesisDoc + privVals []consensus.PrivValidator startHeight int64 mockA bool } @@ -419,7 +422,7 @@ func TestReactorHelperMode(t *testing.T) { msgBz, err := proto.Marshal(msgProto) require.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) + reactor.Receive(channelID, mockPeer{id: p2ptypes.NodeID(step.peer)}, msgBz) assert.Equal(t, old+1, mockSwitch.numStatusResponse) case bcproto.BlockRequest: if ev.Height > params.startHeight { @@ -431,7 +434,7 @@ func TestReactorHelperMode(t *testing.T) { msgBz, err := proto.Marshal(msgProto) require.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) + reactor.Receive(channelID, mockPeer{id: p2ptypes.NodeID(step.peer)}, msgBz) assert.Equal(t, old+1, mockSwitch.numNoBlockResponse) } else { old := mockSwitch.numBlockResponse @@ -442,7 +445,7 @@ func TestReactorHelperMode(t *testing.T) { msgBz, err := proto.Marshal(msgProto) require.NoError(t, err) - reactor.Receive(channelID, mockPeer{id: types.NodeID(step.peer)}, msgBz) + reactor.Receive(channelID, mockPeer{id: p2ptypes.NodeID(step.peer)}, msgBz) assert.Equal(t, old+1, mockSwitch.numBlockResponse) } } @@ -475,8 +478,8 @@ type testApp struct { func newReactorStore( t *testing.T, - genDoc *types.GenesisDoc, - privVals []types.PrivValidator, + genDoc *consensus.GenesisDoc, + privVals []consensus.PrivValidator, maxBlockHeight int64) (*tmstore.BlockStore, sm.State, *sm.BlockExecutor) { t.Helper() @@ -502,7 +505,7 @@ func newReactorStore( // add blocks in for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - lastCommit := types.NewCommit(blockHeight-1, 0, types.BlockID{}, nil) + lastCommit := metadata.NewCommit(blockHeight-1, 0, metadata.BlockID{}, nil) if blockHeight > 1 { lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) lastBlock := blockStore.LoadBlock(blockHeight - 1) @@ -514,14 +517,14 @@ func newReactorStore( time.Now(), ) require.NoError(t, err) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) + lastCommit = metadata.NewCommit(vote.Height, vote.Round, + lastBlockMeta.BlockID, []metadata.CommitSig{vote.CommitSig()}) } thisBlock := sf.MakeBlock(state, blockHeight, lastCommit) - thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) - blockID := types.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} + thisParts := thisBlock.MakePartSet(metadata.BlockPartSizeBytes) + blockID := metadata.BlockID{Hash: thisBlock.Hash(), PartSetHeader: thisParts.Header()} state, err = blockExec.ApplyBlock(state, blockID, thisBlock) require.NoError(t, err) diff --git a/internal/blocksync/v2/scheduler.go b/internal/blocksync/v2/scheduler.go index b731d96a4..350e78cd6 100644 --- a/internal/blocksync/v2/scheduler.go +++ b/internal/blocksync/v2/scheduler.go @@ -8,7 +8,8 @@ import ( "sort" "time" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/p2p" ) // Events generated by the scheduler: @@ -25,7 +26,7 @@ func (e scFinishedEv) String() string { // send a blockRequest message type scBlockRequest struct { priorityNormal - peerID types.NodeID + peerID p2p.NodeID height int64 } @@ -36,8 +37,8 @@ func (e scBlockRequest) String() string { // a block has been received and validated by the scheduler type scBlockReceived struct { priorityNormal - peerID types.NodeID - block *types.Block + peerID p2p.NodeID + block *block.Block } func (e scBlockReceived) String() string { @@ -47,7 +48,7 @@ func (e scBlockReceived) String() string { // scheduler detected a peer error type scPeerError struct { priorityHigh - peerID types.NodeID + peerID p2p.NodeID reason error } @@ -58,7 +59,7 @@ func (e scPeerError) String() string { // scheduler removed a set of peers (timed out or slow peer) type scPeersPruned struct { priorityHigh - peers []types.NodeID + peers []p2p.NodeID } func (e scPeersPruned) String() string { @@ -125,7 +126,7 @@ func (e peerState) String() string { } type scPeer struct { - peerID types.NodeID + peerID p2p.NodeID // initialized as New when peer is added, updated to Ready when statusUpdate is received, // updated to Removed when peer is removed @@ -142,7 +143,7 @@ func (p scPeer) String() string { p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) } -func newScPeer(peerID types.NodeID) *scPeer { +func newScPeer(peerID p2p.NodeID) *scPeer { return &scPeer{ peerID: peerID, state: peerStateNew, @@ -170,7 +171,7 @@ type scheduler struct { // a map of peerID to scheduler specific peer struct `scPeer` used to keep // track of peer specific state - peers map[types.NodeID]*scPeer + peers map[p2p.NodeID]*scPeer peerTimeout time.Duration // maximum response time from a peer otherwise prune minRecvRate int64 // minimum receive rate from peer otherwise prune @@ -182,13 +183,13 @@ type scheduler struct { blockStates map[int64]blockState // a map of heights to the peer we are waiting a response from - pendingBlocks map[int64]types.NodeID + pendingBlocks map[int64]p2p.NodeID // the time at which a block was put in blockStatePending pendingTime map[int64]time.Time // a map of heights to the peers that put the block in blockStateReceived - receivedBlocks map[int64]types.NodeID + receivedBlocks map[int64]p2p.NodeID } func (sc scheduler) String() string { @@ -203,10 +204,10 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler { syncTimeout: 60 * time.Second, height: initHeight, blockStates: make(map[int64]blockState), - peers: make(map[types.NodeID]*scPeer), - pendingBlocks: make(map[int64]types.NodeID), + peers: make(map[p2p.NodeID]*scPeer), + pendingBlocks: make(map[int64]p2p.NodeID), pendingTime: make(map[int64]time.Time), - receivedBlocks: make(map[int64]types.NodeID), + receivedBlocks: make(map[int64]p2p.NodeID), targetPending: 10, // TODO - pass as param peerTimeout: 15 * time.Second, // TODO - pass as param minRecvRate: 0, // int64(7680), TODO - pass as param @@ -215,14 +216,14 @@ func newScheduler(initHeight int64, startTime time.Time) *scheduler { return &sc } -func (sc *scheduler) ensurePeer(peerID types.NodeID) *scPeer { +func (sc *scheduler) ensurePeer(peerID p2p.NodeID) *scPeer { if _, ok := sc.peers[peerID]; !ok { sc.peers[peerID] = newScPeer(peerID) } return sc.peers[peerID] } -func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error { +func (sc *scheduler) touchPeer(peerID p2p.NodeID, time time.Time) error { peer, ok := sc.peers[peerID] if !ok { return fmt.Errorf("couldn't find peer %s", peerID) @@ -237,7 +238,7 @@ func (sc *scheduler) touchPeer(peerID types.NodeID, time time.Time) error { return nil } -func (sc *scheduler) removePeer(peerID types.NodeID) { +func (sc *scheduler) removePeer(peerID p2p.NodeID) { peer, ok := sc.peers[peerID] if !ok { return @@ -297,7 +298,7 @@ func (sc *scheduler) addNewBlocks() { } } -func (sc *scheduler) setPeerRange(peerID types.NodeID, base int64, height int64) error { +func (sc *scheduler) setPeerRange(peerID p2p.NodeID, base int64, height int64) error { peer := sc.ensurePeer(peerID) if peer.state == peerStateRemoved { @@ -332,8 +333,8 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState { } } -func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID { - peers := make([]types.NodeID, 0) +func (sc *scheduler) getPeersWithHeight(height int64) []p2p.NodeID { + peers := make([]p2p.NodeID, 0) for _, peer := range sc.peers { if peer.state != peerStateReady { continue @@ -345,8 +346,8 @@ func (sc *scheduler) getPeersWithHeight(height int64) []types.NodeID { return peers } -func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []types.NodeID { - prunable := make([]types.NodeID, 0) +func (sc *scheduler) prunablePeers(peerTimout time.Duration, minRecvRate int64, now time.Time) []p2p.NodeID { + prunable := make([]p2p.NodeID, 0) for peerID, peer := range sc.peers { if peer.state != peerStateReady { continue @@ -365,7 +366,7 @@ func (sc *scheduler) setStateAtHeight(height int64, state blockState) { } // CONTRACT: peer exists and in Ready state. -func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, now time.Time) error { +func (sc *scheduler) markReceived(peerID p2p.NodeID, height int64, size int64, now time.Time) error { peer := sc.peers[peerID] if state := sc.getStateAtHeight(height); state != blockStatePending || sc.pendingBlocks[height] != peerID { @@ -389,7 +390,7 @@ func (sc *scheduler) markReceived(peerID types.NodeID, height int64, size int64, return nil } -func (sc *scheduler) markPending(peerID types.NodeID, height int64, time time.Time) error { +func (sc *scheduler) markPending(peerID p2p.NodeID, height int64, time time.Time) error { state := sc.getStateAtHeight(height) if state != blockStateNew { return fmt.Errorf("block %d should be in blockStateNew but is %s", height, state) @@ -471,7 +472,7 @@ func (sc *scheduler) nextHeightToSchedule() int64 { return min } -func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 { +func (sc *scheduler) pendingFrom(peerID p2p.NodeID) []int64 { var heights []int64 for height, pendingPeerID := range sc.pendingBlocks { if pendingPeerID == peerID { @@ -481,7 +482,7 @@ func (sc *scheduler) pendingFrom(peerID types.NodeID) []int64 { return heights } -func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) { +func (sc *scheduler) selectPeer(height int64) (p2p.NodeID, error) { peers := sc.getPeersWithHeight(height) if len(peers) == 0 { return "", fmt.Errorf("cannot find peer for height %d", height) @@ -489,7 +490,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) { // create a map from number of pending requests to a list // of peers having that number of pending requests. - pendingFrom := make(map[int][]types.NodeID) + pendingFrom := make(map[int][]p2p.NodeID) for _, peerID := range peers { numPending := len(sc.pendingFrom(peerID)) pendingFrom[numPending] = append(pendingFrom[numPending], peerID) @@ -508,7 +509,7 @@ func (sc *scheduler) selectPeer(height int64) (types.NodeID, error) { } // PeerByID is a list of peers sorted by peerID. -type PeerByID []types.NodeID +type PeerByID []p2p.NodeID func (peers PeerByID) Len() int { return len(peers) diff --git a/internal/blocksync/v2/scheduler_test.go b/internal/blocksync/v2/scheduler_test.go index 91fac3637..84a455659 100644 --- a/internal/blocksync/v2/scheduler_test.go +++ b/internal/blocksync/v2/scheduler_test.go @@ -10,8 +10,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) type scTestParams struct { @@ -19,9 +21,9 @@ type scTestParams struct { initHeight int64 height int64 allB []int64 - pending map[int64]types.NodeID + pending map[int64]p2p.NodeID pendingTime map[int64]time.Time - received map[int64]types.NodeID + received map[int64]p2p.NodeID peerTimeout time.Duration minRecvRate int64 targetPending int @@ -40,7 +42,7 @@ func verifyScheduler(sc *scheduler) { } func newTestScheduler(params scTestParams) *scheduler { - peers := make(map[types.NodeID]*scPeer) + peers := make(map[p2p.NodeID]*scPeer) var maxHeight int64 initHeight := params.initHeight @@ -53,8 +55,8 @@ func newTestScheduler(params scTestParams) *scheduler { } for id, peer := range params.peers { - peer.peerID = types.NodeID(id) - peers[types.NodeID(id)] = peer + peer.peerID = p2p.NodeID(id) + peers[p2p.NodeID(id)] = peer if maxHeight < peer.height { maxHeight = peer.height } @@ -121,7 +123,7 @@ func TestScMaxHeights(t *testing.T) { name: "one ready peer", sc: scheduler{ height: 3, - peers: map[types.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}}, + peers: map[p2p.NodeID]*scPeer{"P1": {height: 6, state: peerStateReady}}, }, wantMax: 6, }, @@ -129,7 +131,7 @@ func TestScMaxHeights(t *testing.T) { name: "ready and removed peers", sc: scheduler{ height: 1, - peers: map[types.NodeID]*scPeer{ + peers: map[p2p.NodeID]*scPeer{ "P1": {height: 4, state: peerStateReady}, "P2": {height: 10, state: peerStateRemoved}}, }, @@ -139,7 +141,7 @@ func TestScMaxHeights(t *testing.T) { name: "removed peers", sc: scheduler{ height: 1, - peers: map[types.NodeID]*scPeer{ + peers: map[p2p.NodeID]*scPeer{ "P1": {height: 4, state: peerStateRemoved}, "P2": {height: 10, state: peerStateRemoved}}, }, @@ -149,7 +151,7 @@ func TestScMaxHeights(t *testing.T) { name: "new peers", sc: scheduler{ height: 1, - peers: map[types.NodeID]*scPeer{ + peers: map[p2p.NodeID]*scPeer{ "P1": {base: -1, height: -1, state: peerStateNew}, "P2": {base: -1, height: -1, state: peerStateNew}}, }, @@ -159,7 +161,7 @@ func TestScMaxHeights(t *testing.T) { name: "mixed peers", sc: scheduler{ height: 1, - peers: map[types.NodeID]*scPeer{ + peers: map[p2p.NodeID]*scPeer{ "P1": {height: -1, state: peerStateNew}, "P2": {height: 10, state: peerStateReady}, "P3": {height: 20, state: peerStateRemoved}, @@ -186,7 +188,7 @@ func TestScMaxHeights(t *testing.T) { func TestScEnsurePeer(t *testing.T) { type args struct { - peerID types.NodeID + peerID p2p.NodeID } tests := []struct { name string @@ -243,7 +245,7 @@ func TestScTouchPeer(t *testing.T) { now := time.Now() type args struct { - peerID types.NodeID + peerID p2p.NodeID time time.Time } @@ -315,13 +317,13 @@ func TestScPrunablePeers(t *testing.T) { name string fields scTestParams args args - wantResult []types.NodeID + wantResult []p2p.NodeID }{ { name: "no peers", fields: scTestParams{peers: map[string]*scPeer{}}, args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{}, + wantResult: []p2p.NodeID{}, }, { name: "mixed peers", @@ -340,7 +342,7 @@ func TestScPrunablePeers(t *testing.T) { "P6": {state: peerStateReady, lastTouched: now.Add(time.Second), lastRate: 90}, }}, args: args{threshold: time.Second, time: now.Add(time.Second + time.Millisecond), minSpeed: 100}, - wantResult: []types.NodeID{"P4", "P5", "P6"}, + wantResult: []p2p.NodeID{"P4", "P5", "P6"}, }, } @@ -360,7 +362,7 @@ func TestScPrunablePeers(t *testing.T) { func TestScRemovePeer(t *testing.T) { type args struct { - peerID types.NodeID + peerID p2p.NodeID } tests := []struct { name string @@ -423,13 +425,13 @@ func TestScRemovePeer(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1"}, }, args: args{peerID: "P1"}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, allB: []int64{}, - pending: map[int64]types.NodeID{}, + pending: map[int64]p2p.NodeID{}, }, }, { @@ -437,13 +439,13 @@ func TestScRemovePeer(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1"}, + received: map[int64]p2p.NodeID{1: "P1"}, }, args: args{peerID: "P1"}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 3, state: peerStateRemoved}}, allB: []int64{}, - received: map[int64]types.NodeID{}, + received: map[int64]p2p.NodeID{}, }, }, { @@ -451,15 +453,15 @@ func TestScRemovePeer(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 3: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 3: "P1"}, + received: map[int64]p2p.NodeID{2: "P1", 4: "P1"}, }, args: args{peerID: "P1"}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}, allB: []int64{}, - pending: map[int64]types.NodeID{}, - received: map[int64]types.NodeID{}, + pending: map[int64]p2p.NodeID{}, + received: map[int64]p2p.NodeID{}, }, }, { @@ -470,8 +472,8 @@ func TestScRemovePeer(t *testing.T) { "P2": {height: 6, state: peerStateReady}, }, allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{1: "P1", 3: "P2", 6: "P1"}, - received: map[int64]types.NodeID{2: "P1", 4: "P2", 5: "P2"}, + pending: map[int64]p2p.NodeID{1: "P1", 3: "P2", 6: "P1"}, + received: map[int64]p2p.NodeID{2: "P1", 4: "P2", 5: "P2"}, }, args: args{peerID: "P1"}, wantFields: scTestParams{ @@ -480,8 +482,8 @@ func TestScRemovePeer(t *testing.T) { "P2": {height: 6, state: peerStateReady}, }, allB: []int64{1, 2, 3, 4, 5, 6}, - pending: map[int64]types.NodeID{3: "P2"}, - received: map[int64]types.NodeID{4: "P2", 5: "P2"}, + pending: map[int64]p2p.NodeID{3: "P2"}, + received: map[int64]p2p.NodeID{4: "P2", 5: "P2"}, }, }, } @@ -500,7 +502,7 @@ func TestScRemovePeer(t *testing.T) { func TestScSetPeerRange(t *testing.T) { type args struct { - peerID types.NodeID + peerID p2p.NodeID base int64 height int64 } @@ -621,25 +623,25 @@ func TestScGetPeersWithHeight(t *testing.T) { name string fields scTestParams args args - wantResult []types.NodeID + wantResult []p2p.NodeID }{ { name: "no peers", fields: scTestParams{peers: map[string]*scPeer{}}, args: args{height: 10}, - wantResult: []types.NodeID{}, + wantResult: []p2p.NodeID{}, }, { name: "only new peers", fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, args: args{height: 10}, - wantResult: []types.NodeID{}, + wantResult: []p2p.NodeID{}, }, { name: "only Removed peers", fields: scTestParams{peers: map[string]*scPeer{"P1": {height: 4, state: peerStateRemoved}}}, args: args{height: 2}, - wantResult: []types.NodeID{}, + wantResult: []p2p.NodeID{}, }, { name: "one Ready shorter peer", @@ -648,7 +650,7 @@ func TestScGetPeersWithHeight(t *testing.T) { allB: []int64{1, 2, 3, 4}, }, args: args{height: 5}, - wantResult: []types.NodeID{}, + wantResult: []p2p.NodeID{}, }, { name: "one Ready equal peer", @@ -657,7 +659,7 @@ func TestScGetPeersWithHeight(t *testing.T) { allB: []int64{1, 2, 3, 4}, }, args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, + wantResult: []p2p.NodeID{"P1"}, }, { name: "one Ready higher peer", @@ -667,7 +669,7 @@ func TestScGetPeersWithHeight(t *testing.T) { allB: []int64{1, 2, 3, 4}, }, args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, + wantResult: []p2p.NodeID{"P1"}, }, { name: "one Ready higher peer at base", @@ -677,7 +679,7 @@ func TestScGetPeersWithHeight(t *testing.T) { allB: []int64{1, 2, 3, 4}, }, args: args{height: 4}, - wantResult: []types.NodeID{"P1"}, + wantResult: []p2p.NodeID{"P1"}, }, { name: "one Ready higher peer with higher base", @@ -687,7 +689,7 @@ func TestScGetPeersWithHeight(t *testing.T) { allB: []int64{1, 2, 3, 4}, }, args: args{height: 4}, - wantResult: []types.NodeID{}, + wantResult: []p2p.NodeID{}, }, { name: "multiple mixed peers", @@ -702,7 +704,7 @@ func TestScGetPeersWithHeight(t *testing.T) { allB: []int64{8, 9, 10, 11}, }, args: args{height: 8}, - wantResult: []types.NodeID{"P2", "P5"}, + wantResult: []p2p.NodeID{"P2", "P5"}, }, } @@ -724,7 +726,7 @@ func TestScMarkPending(t *testing.T) { now := time.Now() type args struct { - peerID types.NodeID + peerID p2p.NodeID height int64 tm time.Time } @@ -820,14 +822,14 @@ func TestScMarkPending(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1"}, pendingTime: map[int64]time.Time{1: now}, }, args: args{peerID: "P1", height: 2, tm: now.Add(time.Millisecond)}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"}, pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Millisecond)}, }, }, @@ -850,7 +852,7 @@ func TestScMarkReceived(t *testing.T) { now := time.Now() type args struct { - peerID types.NodeID + peerID p2p.NodeID height int64 size int64 tm time.Time @@ -890,7 +892,7 @@ func TestScMarkReceived(t *testing.T) { "P2": {height: 4, state: peerStateReady}, }, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, }, args: args{peerID: "P1", height: 2, size: 1000, tm: now}, wantFields: scTestParams{ @@ -899,7 +901,7 @@ func TestScMarkReceived(t *testing.T) { "P2": {height: 4, state: peerStateReady}, }, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P2", 3: "P2", 4: "P1"}, }, wantErr: true, }, @@ -908,13 +910,13 @@ func TestScMarkReceived(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, + pending: map[int64]p2p.NodeID{}, }, args: args{peerID: "P1", height: 2, size: 1000, tm: now}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{}, + pending: map[int64]p2p.NodeID{}, }, wantErr: true, }, @@ -923,14 +925,14 @@ func TestScMarkReceived(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"}, pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, }, args: args{peerID: "P1", height: 2, size: 1000, tm: now}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"}, pendingTime: map[int64]time.Time{1: now, 2: now.Add(time.Second)}, }, wantErr: true, @@ -940,16 +942,16 @@ func TestScMarkReceived(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"}, pendingTime: map[int64]time.Time{1: now, 2: now}, }, args: args{peerID: "P1", height: 2, size: 1000, tm: now.Add(time.Millisecond)}, wantFields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}, - pending: map[int64]types.NodeID{1: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1"}, pendingTime: map[int64]time.Time{1: now}, - received: map[int64]types.NodeID{2: "P1"}, + received: map[int64]p2p.NodeID{2: "P1"}, }, }, } @@ -990,7 +992,7 @@ func TestScMarkProcessed(t *testing.T) { height: 2, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, + pending: map[int64]p2p.NodeID{2: "P1"}, pendingTime: map[int64]time.Time{2: now}, targetPending: 1, }, @@ -1008,15 +1010,15 @@ func TestScMarkProcessed(t *testing.T) { height: 1, peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}, - pending: map[int64]types.NodeID{2: "P1"}, + pending: map[int64]p2p.NodeID{2: "P1"}, pendingTime: map[int64]time.Time{2: now}, - received: map[int64]types.NodeID{1: "P1"}}, + received: map[int64]p2p.NodeID{1: "P1"}}, args: args{height: 1}, wantFields: scTestParams{ height: 2, peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{2}, - pending: map[int64]types.NodeID{2: "P1"}, + pending: map[int64]p2p.NodeID{2: "P1"}, pendingTime: map[int64]time.Time{2: now}}, }, } @@ -1100,7 +1102,7 @@ func TestScAllBlocksProcessed(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, }, wantResult: false, @@ -1110,7 +1112,7 @@ func TestScAllBlocksProcessed(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, + received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, }, wantResult: false, }, @@ -1121,7 +1123,7 @@ func TestScAllBlocksProcessed(t *testing.T) { peers: map[string]*scPeer{ "P1": {height: 4, state: peerStateReady}}, allB: []int64{4}, - received: map[int64]types.NodeID{4: "P1"}, + received: map[int64]p2p.NodeID{4: "P1"}, }, wantResult: true, }, @@ -1130,7 +1132,7 @@ func TestScAllBlocksProcessed(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1", 4: "P1"}, + pending: map[int64]p2p.NodeID{2: "P1", 4: "P1"}, pendingTime: map[int64]time.Time{2: now, 4: now}, }, wantResult: false, @@ -1178,7 +1180,7 @@ func TestScNextHeightToSchedule(t *testing.T) { initHeight: 1, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, pendingTime: map[int64]time.Time{1: now, 2: now, 3: now, 4: now}, }, wantHeight: -1, @@ -1189,7 +1191,7 @@ func TestScNextHeightToSchedule(t *testing.T) { initHeight: 1, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, + received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1", 4: "P1"}, }, wantHeight: -1, }, @@ -1208,7 +1210,7 @@ func TestScNextHeightToSchedule(t *testing.T) { initHeight: 1, peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}, - pending: map[int64]types.NodeID{2: "P1"}, + pending: map[int64]p2p.NodeID{2: "P1"}, pendingTime: map[int64]time.Time{2: now}, }, wantHeight: 1, @@ -1238,7 +1240,7 @@ func TestScSelectPeer(t *testing.T) { name string fields scTestParams args args - wantResult types.NodeID + wantResult p2p.NodeID wantError bool }{ { @@ -1306,7 +1308,7 @@ func TestScSelectPeer(t *testing.T) { "P1": {height: 8, state: peerStateReady}, "P2": {height: 9, state: peerStateReady}}, allB: []int64{4, 5, 6, 7, 8, 9}, - pending: map[int64]types.NodeID{ + pending: map[int64]p2p.NodeID{ 4: "P1", 6: "P1", 5: "P2", }, @@ -1322,7 +1324,7 @@ func TestScSelectPeer(t *testing.T) { "P1": {height: 15, state: peerStateReady}, "P3": {height: 15, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, - pending: map[int64]types.NodeID{ + pending: map[int64]p2p.NodeID{ 1: "P1", 2: "P1", 3: "P3", 4: "P3", 5: "P2", 6: "P2", @@ -1348,8 +1350,8 @@ func TestScSelectPeer(t *testing.T) { } // makeScBlock makes an empty block. -func makeScBlock(height int64) *types.Block { - return &types.Block{Header: types.Header{Height: height}} +func makeScBlock(height int64) *block.Block { + return &block.Block{Header: metadata.Header{Height: height}} } // used in place of assert.Equal(t, want, actual) to avoid failures due to @@ -1391,7 +1393,7 @@ func TestScHandleBlockResponse(t *testing.T) { now := time.Now() block6FromP1 := bcBlockResponse{ time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), + peerID: p2p.NodeID("P1"), size: 100, block: makeScBlock(6), } @@ -1432,7 +1434,7 @@ func TestScHandleBlockResponse(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, + pending: map[int64]p2p.NodeID{6: "P2"}, pendingTime: map[int64]time.Time{6: now}, }, args: args{event: block6FromP1}, @@ -1443,7 +1445,7 @@ func TestScHandleBlockResponse(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, + pending: map[int64]p2p.NodeID{6: "P1"}, pendingTime: map[int64]time.Time{6: now.Add(time.Second)}, }, args: args{event: block6FromP1}, @@ -1454,7 +1456,7 @@ func TestScHandleBlockResponse(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, + pending: map[int64]p2p.NodeID{6: "P1"}, pendingTime: map[int64]time.Time{6: now}, }, args: args{event: block6FromP1}, @@ -1476,7 +1478,7 @@ func TestScHandleNoBlockResponse(t *testing.T) { now := time.Now() noBlock6FromP1 := bcNoBlockResponse{ time: now.Add(time.Millisecond), - peerID: types.NodeID("P1"), + peerID: p2p.NodeID("P1"), height: 6, } @@ -1512,14 +1514,14 @@ func TestScHandleNoBlockResponse(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, + pending: map[int64]p2p.NodeID{6: "P2"}, pendingTime: map[int64]time.Time{6: now}, }, wantEvent: noOpEvent{}, wantFields: scTestParams{ peers: map[string]*scPeer{"P2": {height: 8, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P2"}, + pending: map[int64]p2p.NodeID{6: "P2"}, pendingTime: map[int64]time.Time{6: now}, }, }, @@ -1528,7 +1530,7 @@ func TestScHandleNoBlockResponse(t *testing.T) { fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, + pending: map[int64]p2p.NodeID{6: "P1"}, pendingTime: map[int64]time.Time{6: now}, }, wantEvent: scPeerError{peerID: "P1", reason: fmt.Errorf("some error")}, @@ -1551,7 +1553,7 @@ func TestScHandleNoBlockResponse(t *testing.T) { func TestScHandleBlockProcessed(t *testing.T) { now := time.Now() processed6FromP1 := pcBlockProcessed{ - peerID: types.NodeID("P1"), + peerID: p2p.NodeID("P1"), height: 6, } @@ -1578,7 +1580,7 @@ func TestScHandleBlockProcessed(t *testing.T) { initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, + pending: map[int64]p2p.NodeID{6: "P1"}, pendingTime: map[int64]time.Time{6: now}, }, args: args{event: processed6FromP1}, @@ -1590,7 +1592,7 @@ func TestScHandleBlockProcessed(t *testing.T) { initHeight: 6, peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, + received: map[int64]p2p.NodeID{6: "P1", 7: "P1"}, }, args: args{event: processed6FromP1}, wantEvent: scFinishedEv{}, @@ -1601,8 +1603,8 @@ func TestScHandleBlockProcessed(t *testing.T) { initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{6: "P1"}, + pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"}, + received: map[int64]p2p.NodeID{6: "P1"}, }, args: args{event: processed6FromP1}, wantEvent: noOpEvent{}, @@ -1645,7 +1647,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, + pending: map[int64]p2p.NodeID{6: "P1"}, pendingTime: map[int64]time.Time{6: now}, }, args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, @@ -1657,7 +1659,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { initHeight: 6, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, allB: []int64{6, 7, 8}, - pending: map[int64]types.NodeID{6: "P1"}, + pending: map[int64]p2p.NodeID{6: "P1"}, pendingTime: map[int64]time.Time{6: now}, }, args: args{event: pcBlockVerificationFailure{height: 10, firstPeerID: "P1", secondPeerID: "P1"}}, @@ -1669,7 +1671,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { initHeight: 6, peers: map[string]*scPeer{"P1": {height: 7, state: peerStateReady}}, allB: []int64{6, 7}, - received: map[int64]types.NodeID{6: "P1", 7: "P1"}, + received: map[int64]p2p.NodeID{6: "P1", 7: "P1"}, }, args: args{event: pcBlockVerificationFailure{height: 7, firstPeerID: "P1", secondPeerID: "P1"}}, wantEvent: scFinishedEv{}, @@ -1680,8 +1682,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { initHeight: 5, peers: map[string]*scPeer{"P1": {height: 8, state: peerStateReady}, "P2": {height: 8, state: peerStateReady}}, allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, + pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"}, + received: map[int64]p2p.NodeID{5: "P1", 6: "P1"}, }, args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P1"}}, wantEvent: noOpEvent{}, @@ -1696,8 +1698,8 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { "P3": {height: 8, state: peerStateReady}, }, allB: []int64{5, 6, 7, 8}, - pending: map[int64]types.NodeID{7: "P1", 8: "P1"}, - received: map[int64]types.NodeID{5: "P1", 6: "P1"}, + pending: map[int64]p2p.NodeID{7: "P1", 8: "P1"}, + received: map[int64]p2p.NodeID{5: "P1", 6: "P1"}, }, args: args{event: pcBlockVerificationFailure{height: 5, firstPeerID: "P1", secondPeerID: "P2"}}, wantEvent: noOpEvent{}, @@ -1716,7 +1718,7 @@ func TestScHandleBlockVerificationFailure(t *testing.T) { func TestScHandleAddNewPeer(t *testing.T) { addP1 := bcAddNewPeer{ - peerID: types.NodeID("P1"), + peerID: p2p.NodeID("P1"), } type args struct { event bcAddNewPeer @@ -1827,7 +1829,7 @@ func TestScHandleTryPrunePeer(t *testing.T) { allB: []int64{1, 2, 3, 4, 5, 6, 7}, peerTimeout: time.Second}, args: args{event: pruneEv}, - wantEvent: scPeersPruned{peers: []types.NodeID{"P4", "P5", "P6"}}, + wantEvent: scPeersPruned{peers: []p2p.NodeID{"P4", "P5", "P6"}}, }, { name: "mixed peers, finish after pruning", @@ -1925,7 +1927,7 @@ func TestScHandleTrySchedule(t *testing.T) { "P1": {height: 4, state: peerStateReady}, "P2": {height: 5, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5}, - pending: map[int64]types.NodeID{ + pending: map[int64]p2p.NodeID{ 1: "P1", 2: "P1", 3: "P2", }, @@ -1943,7 +1945,7 @@ func TestScHandleTrySchedule(t *testing.T) { "P1": {height: 8, state: peerStateReady}, "P3": {height: 8, state: peerStateReady}}, allB: []int64{1, 2, 3, 4, 5, 6, 7, 8}, - pending: map[int64]types.NodeID{ + pending: map[int64]p2p.NodeID{ 1: "P1", 2: "P1", 3: "P3", 4: "P3", 5: "P2", 6: "P2", @@ -2105,7 +2107,7 @@ func TestScHandle(t *testing.T) { startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1"}, pendingTime: map[int64]time.Time{1: tick[1]}, height: 1, }, @@ -2117,7 +2119,7 @@ func TestScHandle(t *testing.T) { startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1"}, pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2]}, height: 1, }, @@ -2129,7 +2131,7 @@ func TestScHandle(t *testing.T) { startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady}}, allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, + pending: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"}, pendingTime: map[int64]time.Time{1: tick[1], 2: tick[2], 3: tick[3]}, height: 1, }, @@ -2141,9 +2143,9 @@ func TestScHandle(t *testing.T) { startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[4]}}, allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{2: "P1", 3: "P1"}, + pending: map[int64]p2p.NodeID{2: "P1", 3: "P1"}, pendingTime: map[int64]time.Time{2: tick[2], 3: tick[3]}, - received: map[int64]types.NodeID{1: "P1"}, + received: map[int64]p2p.NodeID{1: "P1"}, height: 1, }, }, @@ -2154,9 +2156,9 @@ func TestScHandle(t *testing.T) { startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[5]}}, allB: []int64{1, 2, 3}, - pending: map[int64]types.NodeID{3: "P1"}, + pending: map[int64]p2p.NodeID{3: "P1"}, pendingTime: map[int64]time.Time{3: tick[3]}, - received: map[int64]types.NodeID{1: "P1", 2: "P1"}, + received: map[int64]p2p.NodeID{1: "P1", 2: "P1"}, height: 1, }, }, @@ -2167,29 +2169,29 @@ func TestScHandle(t *testing.T) { startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, + received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"}, height: 1, }, }, { // processed block 1 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 1}}, + args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 1}}, wantEvent: noOpEvent{}, wantSc: &scTestParams{ startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, allB: []int64{2, 3}, - received: map[int64]types.NodeID{2: "P1", 3: "P1"}, + received: map[int64]p2p.NodeID{2: "P1", 3: "P1"}, height: 2, }, }, { // processed block 2 - args: args{event: pcBlockProcessed{peerID: types.NodeID("P1"), height: 2}}, + args: args{event: pcBlockProcessed{peerID: p2p.NodeID("P1"), height: 2}}, wantEvent: scFinishedEv{}, wantSc: &scTestParams{ startTime: now, peers: map[string]*scPeer{"P1": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, allB: []int64{3}, - received: map[int64]types.NodeID{3: "P1"}, + received: map[int64]p2p.NodeID{3: "P1"}, height: 3, }, }, @@ -2205,7 +2207,7 @@ func TestScHandle(t *testing.T) { "P1": {height: 4, state: peerStateReady, lastTouched: tick[6]}, "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, allB: []int64{1, 2, 3, 4}, - received: map[int64]types.NodeID{1: "P1", 2: "P1", 3: "P1"}, + received: map[int64]p2p.NodeID{1: "P1", 2: "P1", 3: "P1"}, height: 1, }, args: args{event: pcBlockVerificationFailure{height: 1, firstPeerID: "P1", secondPeerID: "P1"}}, @@ -2216,7 +2218,7 @@ func TestScHandle(t *testing.T) { "P1": {height: 4, state: peerStateRemoved, lastTouched: tick[6]}, "P2": {height: 3, state: peerStateReady, lastTouched: tick[6]}}, allB: []int64{1, 2, 3}, - received: map[int64]types.NodeID{}, + received: map[int64]p2p.NodeID{}, height: 1, }, }, diff --git a/internal/consensus/byzantine_test.go b/internal/consensus/byzantine_test.go index 1c6ec858b..971b1d843 100644 --- a/internal/consensus/byzantine_test.go +++ b/internal/consensus/byzantine_test.go @@ -11,18 +11,22 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abcicli "github.com/tendermint/tendermint/abci/client" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/evidence" tmsync "github.com/tendermint/tendermint/internal/libs/sync" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + evtypes "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" ) @@ -54,7 +58,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal app := appFunc() - vals := types.TM2PB.ValidatorUpdates(state.Validators) + vals := consensus.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) blockDB := dbm.NewMemDB() @@ -85,7 +89,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { pv := privVals[i] cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() + eventBus := events.NewEventBus() eventBus.SetLogger(log.TestingLogger().With("module", "events")) err = eventBus.Start() require.NoError(t, err) @@ -100,7 +104,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { rts := setup(t, nValidators, states, 100) // buffer must be large enough to not deadlock - var bzNodeID types.NodeID + var bzNodeID p2ptypes.NodeID // Set the first state's reactor as the dedicated byzantine reactor and grab // the NodeID that corresponds to the state so we can reference the reactor. @@ -125,7 +129,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { ) require.NoError(t, err) - prevote2, err := bzNodeState.signVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + prevote2, err := bzNodeState.signVote(tmproto.PrevoteType, nil, metadata.PartSetHeader{}) require.NoError(t, err) // send two votes to all peers (1st to one half, 2nd to another half) @@ -167,12 +171,12 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { lazyNodeState.Logger.Info("Lazy Proposer proposing condensed commit") require.NotNil(t, lazyNodeState.privValidator) - var commit *types.Commit + var commit *metadata.Commit switch { case lazyNodeState.Height == lazyNodeState.state.InitialHeight: // We're creating a proposal for the first block. // The commit is empty, but not nil. - commit = types.NewCommit(0, 0, types.BlockID{}, nil) + commit = metadata.NewCommit(0, 0, metadata.BlockID{}, nil) case lazyNodeState.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = lazyNodeState.LastCommit.MakeCommit() @@ -182,7 +186,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { } // omit the last signature in the commit - commit.Signatures[len(commit.Signatures)-1] = types.NewCommitSigAbsent() + commit.Signatures[len(commit.Signatures)-1] = metadata.NewCommitSigAbsent() if lazyNodeState.privValidatorPubKey == nil { // If this node is a validator & proposer in the current round, it will @@ -203,8 +207,8 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { } // Make proposal - propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal := types.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID) + propBlockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} + proposal := consensus.NewProposal(height, round, lazyNodeState.ValidRound, propBlockID) p := proposal.ToProto() if err := lazyNodeState.privValidator.SignProposal(context.Background(), lazyNodeState.state.ChainID, p); err == nil { proposal.Signature = p.Signature @@ -229,20 +233,20 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { // Evidence should be submitted and committed at the third height but // we will check the first six just in case - evidenceFromEachValidator := make([]types.Evidence, nValidators) + evidenceFromEachValidator := make([]evtypes.Evidence, nValidators) wg := new(sync.WaitGroup) i := 0 for _, sub := range rts.subs { wg.Add(1) - go func(j int, s types.Subscription) { + go func(j int, s events.Subscription) { defer wg.Done() for { select { case msg := <-s.Out(): require.NotNil(t, msg) - block := msg.Data().(types.EventDataNewBlock).Block + block := msg.Data().(events.EventDataNewBlock).Block if len(block.Evidence.Evidence) != 0 { evidenceFromEachValidator[j] = block.Evidence.Evidence[0] return @@ -264,7 +268,7 @@ func TestByzantinePrevoteEquivocation(t *testing.T) { for idx, ev := range evidenceFromEachValidator { if assert.NotNil(t, ev, idx) { - ev, ok := ev.(*types.DuplicateVoteEvidence) + ev, ok := ev.(*evtypes.DuplicateVoteEvidence) assert.True(t, ok) assert.Equal(t, pubkey.Address(), ev.VoteA.ValidatorAddress) assert.Equal(t, prevoteHeight, ev.Height()) diff --git a/internal/consensus/common_test.go b/internal/consensus/common_test.go index 17ba1ce2e..0c519035b 100644 --- a/internal/consensus/common_test.go +++ b/internal/consensus/common_test.go @@ -20,7 +20,6 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/internal/consensus/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" @@ -31,11 +30,15 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) const ( @@ -86,14 +89,14 @@ type validatorStub struct { Index int32 // Validator index. NOTE: we don't assume validator set changes. Height int64 Round int32 - types.PrivValidator + consensus.PrivValidator VotingPower int64 - lastVote *types.Vote + lastVote *consensus.Vote } const testMinPower int64 = 10 -func newValidatorStub(privValidator types.PrivValidator, valIndex int32) *validatorStub { +func newValidatorStub(privValidator consensus.PrivValidator, valIndex int32) *validatorStub { return &validatorStub{ Index: valIndex, PrivValidator: privValidator, @@ -105,21 +108,21 @@ func (vs *validatorStub) signVote( config *cfg.Config, voteType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader) (*types.Vote, error) { + header metadata.PartSetHeader) (*consensus.Vote, error) { pubKey, err := vs.PrivValidator.GetPubKey(context.Background()) if err != nil { return nil, fmt.Errorf("can't get pubkey: %w", err) } - vote := &types.Vote{ + vote := &consensus.Vote{ ValidatorIndex: vs.Index, ValidatorAddress: pubKey.Address(), Height: vs.Height, Round: vs.Round, Timestamp: tmtime.Now(), Type: voteType, - BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: header}, } v := vote.ToProto() if err := vs.PrivValidator.SignVote(context.Background(), config.ChainID(), v); err != nil { @@ -144,7 +147,7 @@ func signVote( config *cfg.Config, voteType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader) *types.Vote { + header metadata.PartSetHeader) *consensus.Vote { v, err := vs.signVote(config, voteType, hash, header) if err != nil { @@ -160,9 +163,9 @@ func signVotes( config *cfg.Config, voteType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader, - vss ...*validatorStub) []*types.Vote { - votes := make([]*types.Vote, len(vss)) + header metadata.PartSetHeader, + vss ...*validatorStub) []*consensus.Vote { + votes := make([]*consensus.Vote, len(vss)) for i, vs := range vss { votes[i] = signVote(vs, config, voteType, hash, header) } @@ -225,7 +228,7 @@ func decideProposal( vs *validatorStub, height int64, round int32, -) (proposal *types.Proposal, block *types.Block) { +) (proposal *consensus.Proposal, block *block.Block) { cs1.mtx.Lock() block, blockParts := cs1.createProposalBlock() validRound := cs1.ValidRound @@ -236,8 +239,8 @@ func decideProposal( } // Make proposal - polRound, propBlockID := validRound, types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} - proposal = types.NewProposal(height, round, polRound, propBlockID) + polRound, propBlockID := validRound, metadata.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} + proposal = consensus.NewProposal(height, round, polRound, propBlockID) p := proposal.ToProto() if err := vs.SignProposal(context.Background(), chainID, p); err != nil { panic(err) @@ -248,7 +251,7 @@ func decideProposal( return } -func addVotes(to *State, votes ...*types.Vote) { +func addVotes(to *State, votes ...*consensus.Vote) { for _, vote := range votes { to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}} } @@ -259,7 +262,7 @@ func signAddVotes( to *State, voteType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader, + header metadata.PartSetHeader, vss ...*validatorStub, ) { votes := signVotes(config, voteType, hash, header, vss...) @@ -271,7 +274,7 @@ func validatePrevote(t *testing.T, cs *State, round int32, privVal *validatorStu pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) address := pubKey.Address() - var vote *types.Vote + var vote *consensus.Vote if vote = prevotes.GetByAddress(address); vote == nil { panic("Failed to find prevote from validator") } @@ -291,7 +294,7 @@ func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, bloc pv, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) address := pv.Address() - var vote *types.Vote + var vote *consensus.Vote if vote = votes.GetByAddress(address); vote == nil { panic("Failed to find precommit from validator") } @@ -313,7 +316,7 @@ func validatePrecommit( pv, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) address := pv.Address() - var vote *types.Vote + var vote *consensus.Vote if vote = precommits.GetByAddress(address); vote == nil { panic("Failed to find precommit from validator") } @@ -366,14 +369,14 @@ func validatePrevoteAndPrecommit( } func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { - votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, types.EventQueryVote) + votesSub, err := cs.eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, events.EventQueryVote) if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) + panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, events.EventQueryVote)) } ch := make(chan tmpubsub.Message) go func() { for msg := range votesSub.Out() { - vote := msg.Data().(types.EventDataVote) + vote := msg.Data().(events.EventDataVote) // we only fire for our own votes if bytes.Equal(addr, vote.Vote.ValidatorAddress) { ch <- msg @@ -386,7 +389,7 @@ func subscribeToVoter(cs *State, addr []byte) <-chan tmpubsub.Message { //------------------------------------------------------------------------------- // consensus states -func newState(state sm.State, pv types.PrivValidator, app abci.Application) *State { +func newState(state sm.State, pv consensus.PrivValidator, app abci.Application) *State { config := cfg.ResetTestRoot("consensus_state_test") return newStateWithConfig(config, state, pv, app) } @@ -394,7 +397,7 @@ func newState(state sm.State, pv types.PrivValidator, app abci.Application) *Sta func newStateWithConfig( thisConfig *cfg.Config, state sm.State, - pv types.PrivValidator, + pv consensus.PrivValidator, app abci.Application, ) *State { blockStore := store.NewBlockStore(dbm.NewMemDB()) @@ -404,7 +407,7 @@ func newStateWithConfig( func newStateWithConfigAndBlockStore( thisConfig *cfg.Config, state sm.State, - pv types.PrivValidator, + pv consensus.PrivValidator, app abci.Application, blockStore *store.BlockStore, ) *State { @@ -434,7 +437,7 @@ func newStateWithConfigAndBlockStore( cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() + eventBus := events.NewEventBus() eventBus.SetLogger(log.TestingLogger().With("module", "events")) err := eventBus.Start() if err != nil { @@ -519,7 +522,7 @@ func ensureNewEvent(ch <-chan tmpubsub.Message, height int64, round int32, timeo case <-time.After(timeout): panic(errorMessage) case msg := <-ch: - roundStateEvent, ok := msg.Data().(types.EventDataRoundState) + roundStateEvent, ok := msg.Data().(events.EventDataRoundState) if !ok { panic(fmt.Sprintf("expected a EventDataRoundState, got %T. Wrong subscription channel?", msg.Data())) @@ -539,7 +542,7 @@ func ensureNewRound(roundCh <-chan tmpubsub.Message, height int64, round int32) case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewRound event") case msg := <-roundCh: - newRoundEvent, ok := msg.Data().(types.EventDataNewRound) + newRoundEvent, ok := msg.Data().(events.EventDataNewRound) if !ok { panic(fmt.Sprintf("expected a EventDataNewRound, got %T. Wrong subscription channel?", msg.Data())) @@ -564,7 +567,7 @@ func ensureNewProposal(proposalCh <-chan tmpubsub.Message, height int64, round i case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewProposal event") case msg := <-proposalCh: - proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) + proposalEvent, ok := msg.Data().(events.EventDataCompleteProposal) if !ok { panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", msg.Data())) @@ -588,7 +591,7 @@ func ensureNewBlock(blockCh <-chan tmpubsub.Message, height int64) { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewBlock event") case msg := <-blockCh: - blockEvent, ok := msg.Data().(types.EventDataNewBlock) + blockEvent, ok := msg.Data().(events.EventDataNewBlock) if !ok { panic(fmt.Sprintf("expected a EventDataNewBlock, got %T. Wrong subscription channel?", msg.Data())) @@ -604,7 +607,7 @@ func ensureNewBlockHeader(blockCh <-chan tmpubsub.Message, height int64, blockHa case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewBlockHeader event") case msg := <-blockCh: - blockHeaderEvent, ok := msg.Data().(types.EventDataNewBlockHeader) + blockHeaderEvent, ok := msg.Data().(events.EventDataNewBlockHeader) if !ok { panic(fmt.Sprintf("expected a EventDataNewBlockHeader, got %T. Wrong subscription channel?", msg.Data())) @@ -623,12 +626,12 @@ func ensureNewUnlock(unlockCh <-chan tmpubsub.Message, height int64, round int32 "Timeout expired while waiting for NewUnlock event") } -func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID types.BlockID) { +func ensureProposal(proposalCh <-chan tmpubsub.Message, height int64, round int32, propID metadata.BlockID) { select { case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewProposal event") case msg := <-proposalCh: - proposalEvent, ok := msg.Data().(types.EventDataCompleteProposal) + proposalEvent, ok := msg.Data().(events.EventDataCompleteProposal) if !ok { panic(fmt.Sprintf("expected a EventDataCompleteProposal, got %T. Wrong subscription channel?", msg.Data())) @@ -659,7 +662,7 @@ func ensureVote(voteCh <-chan tmpubsub.Message, height int64, round int32, case <-time.After(ensureTimeout): panic("Timeout expired while waiting for NewVote event") case msg := <-voteCh: - voteEvent, ok := msg.Data().(types.EventDataVote) + voteEvent, ok := msg.Data().(events.EventDataVote) if !ok { panic(fmt.Sprintf("expected a EventDataVote, got %T. Wrong subscription channel?", msg.Data())) @@ -739,7 +742,7 @@ func randConsensusState( closeFuncs = append(closeFuncs, appCloser.Close) } - vals := types.TM2PB.ValidatorUpdates(state.Validators) + vals := consensus.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) css[i] = newStateWithConfigAndBlockStore(thisConfig, state, privVals[i], app, blockStore) @@ -765,7 +768,7 @@ func randConsensusNetWithPeers( testName string, tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application, -) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) { +) ([]*State, *consensus.GenesisDoc, *cfg.Config, cleanupFunc) { genDoc, privVals := factory.RandGenesisDoc(config, nValidators, false, testMinPower) css := make([]*State, nPeers) logger := consensusLogger() @@ -780,7 +783,7 @@ func randConsensusNetWithPeers( if i == 0 { peer0Config = thisConfig } - var privVal types.PrivValidator + var privVal consensus.PrivValidator if i < nValidators { privVal = privVals[i] } else { @@ -800,7 +803,7 @@ func randConsensusNetWithPeers( } app := appFunc(path.Join(config.DBDir(), fmt.Sprintf("%s_%d", testName, i))) - vals := types.TM2PB.ValidatorUpdates(state.Validators) + vals := consensus.TM2PB.ValidatorUpdates(state.Validators) if _, ok := app.(*kvstore.PersistentKVStoreApplication); ok { // simulate handshake, receive app version. If don't do this, replay test will fail state.Version.Consensus.App = kvstore.ProtocolVersion @@ -823,7 +826,7 @@ func randGenesisState( config *cfg.Config, numValidators int, randPower bool, - minPower int64) (sm.State, []types.PrivValidator) { + minPower int64) (sm.State, []consensus.PrivValidator) { genDoc, privValidators := factory.RandGenesisDoc(config, numValidators, randPower, minPower) s0, _ := sm.MakeGenesisState(genDoc) @@ -891,7 +894,7 @@ func newPersistentKVStoreWithPath(dbDir string) abci.Application { return kvstore.NewPersistentKVStoreApplication(dbDir) } -func signDataIsEqual(v1 *types.Vote, v2 *tmproto.Vote) bool { +func signDataIsEqual(v1 *consensus.Vote, v2 *tmproto.Vote) bool { if v1 == nil || v2 == nil { return false } diff --git a/internal/consensus/invalid_test.go b/internal/consensus/invalid_test.go index 6f858ee11..0f8de292d 100644 --- a/internal/consensus/invalid_test.go +++ b/internal/consensus/invalid_test.go @@ -9,9 +9,11 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) func TestReactorInvalidPrecommit(t *testing.T) { @@ -59,7 +61,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { for _, sub := range rts.subs { wg.Add(1) - go func(s types.Subscription) { + go func(s events.Subscription) { <-s.Out() wg.Done() }(sub) @@ -69,7 +71,7 @@ func TestReactorInvalidPrecommit(t *testing.T) { wg.Wait() } -func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv types.PrivValidator) { +func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r *Reactor, pv consensus.PrivValidator) { // routine to: // - precommit for a random block // - send precommit to all peers @@ -86,16 +88,16 @@ func invalidDoPrevoteFunc(t *testing.T, height int64, round int32, cs *State, r // precommit a random block blockHash := bytes.HexBytes(tmrand.Bytes(32)) - precommit := &types.Vote{ + precommit := &consensus.Vote{ ValidatorAddress: addr, ValidatorIndex: valIndex, Height: cs.Height, Round: cs.Round, Timestamp: cs.voteTime(), Type: tmproto.PrecommitType, - BlockID: types.BlockID{ + BlockID: metadata.BlockID{ Hash: blockHash, - PartSetHeader: types.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}}, + PartSetHeader: metadata.PartSetHeader{Total: 1, Hash: tmrand.Bytes(32)}}, } p := precommit.ToProto() diff --git a/internal/consensus/mempool_test.go b/internal/consensus/mempool_test.go index 5edec248a..9f2492ccd 100644 --- a/internal/consensus/mempool_test.go +++ b/internal/consensus/mempool_test.go @@ -14,11 +14,12 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/code" - abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) // for testing @@ -37,7 +38,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + newBlockCh := subscribe(cs.eventBus, events.EventQueryNewBlock) startTestRound(cs, height, round) ensureNewEventOnChannel(newBlockCh) // first block gets committed @@ -60,7 +61,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { assertMempool(cs.txNotifier).EnableTxsAvailable() - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + newBlockCh := subscribe(cs.eventBus, events.EventQueryNewBlock) startTestRound(cs, cs.Height, cs.Round) ensureNewEventOnChannel(newBlockCh) // first block gets committed @@ -79,10 +80,10 @@ func TestMempoolProgressInHigherRound(t *testing.T) { cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - cs.setProposal = func(proposal *types.Proposal) error { + newBlockCh := subscribe(cs.eventBus, events.EventQueryNewBlock) + newRoundCh := subscribe(cs.eventBus, events.EventQueryNewRound) + timeoutCh := subscribe(cs.eventBus, events.EventQueryTimeoutPropose) + cs.setProposal = func(proposal *consensus.Proposal) error { if cs.Height == 2 && cs.Round == 0 { // dont set the proposal in round 0 so we timeout and // go to next round @@ -129,7 +130,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockStore) err := stateStore.Save(state) require.NoError(t, err) - newBlockHeaderCh := subscribe(cs.eventBus, types.EventQueryNewBlockHeader) + newBlockHeaderCh := subscribe(cs.eventBus, events.EventQueryNewBlockHeader) const numTxs int64 = 3000 go deliverTxsRange(cs, 0, int(numTxs)) @@ -138,7 +139,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { for n := int64(0); n < numTxs; { select { case msg := <-newBlockHeaderCh: - headerEvent := msg.Data().(types.EventDataNewBlockHeader) + headerEvent := msg.Data().(events.EventDataNewBlockHeader) n += headerEvent.NumTxs case <-time.After(30 * time.Second): t.Fatal("Timed out waiting 30s to commit blocks with transactions") diff --git a/internal/consensus/metrics.go b/internal/consensus/metrics.go index bceac4942..110012880 100644 --- a/internal/consensus/metrics.go +++ b/internal/consensus/metrics.go @@ -3,7 +3,7 @@ package consensus import ( "github.com/go-kit/kit/metrics" "github.com/go-kit/kit/metrics/discard" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" prometheus "github.com/go-kit/kit/metrics/prometheus" stdprometheus "github.com/prometheus/client_golang/prometheus" @@ -221,7 +221,7 @@ func NopMetrics() *Metrics { } // RecordConsMetrics uses for recording the block related metrics during fast-sync. -func (m *Metrics) RecordConsMetrics(block *types.Block) { +func (m *Metrics) RecordConsMetrics(block *block.Block) { m.NumTxs.Set(float64(len(block.Data.Txs))) m.TotalTxs.Add(float64(len(block.Data.Txs))) m.BlockSizeBytes.Observe(float64(block.Size())) diff --git a/internal/consensus/msgs.go b/internal/consensus/msgs.go index 17aef9aa2..e02963f54 100644 --- a/internal/consensus/msgs.go +++ b/internal/consensus/msgs.go @@ -8,9 +8,12 @@ import ( "github.com/tendermint/tendermint/libs/bits" tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) // Message defines an interface that the consensus domain types implement. When @@ -95,7 +98,7 @@ func (m *NewRoundStepMessage) String() string { type NewValidBlockMessage struct { Height int64 Round int32 - BlockPartSetHeader types.PartSetHeader + BlockPartSetHeader metadata.PartSetHeader BlockParts *bits.BitArray IsCommit bool } @@ -119,8 +122,8 @@ func (m *NewValidBlockMessage) ValidateBasic() error { m.BlockParts.Size(), m.BlockPartSetHeader.Total) } - if m.BlockParts.Size() > int(types.MaxBlockPartsCount) { - return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), types.MaxBlockPartsCount) + if m.BlockParts.Size() > int(metadata.MaxBlockPartsCount) { + return fmt.Errorf("blockParts bit array is too big: %d, max: %d", m.BlockParts.Size(), metadata.MaxBlockPartsCount) } return nil } @@ -133,7 +136,7 @@ func (m *NewValidBlockMessage) String() string { // ProposalMessage is sent when a new block is proposed. type ProposalMessage struct { - Proposal *types.Proposal + Proposal *consensus.Proposal } // ValidateBasic performs basic validation. @@ -164,8 +167,8 @@ func (m *ProposalPOLMessage) ValidateBasic() error { if m.ProposalPOL.Size() == 0 { return errors.New("empty ProposalPOL bit array") } - if m.ProposalPOL.Size() > types.MaxVotesCount { - return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), types.MaxVotesCount) + if m.ProposalPOL.Size() > consensus.MaxVotesCount { + return fmt.Errorf("proposalPOL bit array is too big: %d, max: %d", m.ProposalPOL.Size(), consensus.MaxVotesCount) } return nil } @@ -179,7 +182,7 @@ func (m *ProposalPOLMessage) String() string { type BlockPartMessage struct { Height int64 Round int32 - Part *types.Part + Part *metadata.Part } // ValidateBasic performs basic validation. @@ -203,7 +206,7 @@ func (m *BlockPartMessage) String() string { // VoteMessage is sent when voting for a proposal (or lack thereof). type VoteMessage struct { - Vote *types.Vote + Vote *consensus.Vote } // ValidateBasic performs basic validation. @@ -232,7 +235,7 @@ func (m *HasVoteMessage) ValidateBasic() error { if m.Round < 0 { return errors.New("negative Round") } - if !types.IsVoteTypeValid(m.Type) { + if !consensus.IsVoteTypeValid(m.Type) { return errors.New("invalid Type") } if m.Index < 0 { @@ -251,7 +254,7 @@ type VoteSetMaj23Message struct { Height int64 Round int32 Type tmproto.SignedMsgType - BlockID types.BlockID + BlockID metadata.BlockID } // ValidateBasic performs basic validation. @@ -262,7 +265,7 @@ func (m *VoteSetMaj23Message) ValidateBasic() error { if m.Round < 0 { return errors.New("negative Round") } - if !types.IsVoteTypeValid(m.Type) { + if !consensus.IsVoteTypeValid(m.Type) { return errors.New("invalid Type") } if err := m.BlockID.ValidateBasic(); err != nil { @@ -283,7 +286,7 @@ type VoteSetBitsMessage struct { Height int64 Round int32 Type tmproto.SignedMsgType - BlockID types.BlockID + BlockID metadata.BlockID Votes *bits.BitArray } @@ -292,7 +295,7 @@ func (m *VoteSetBitsMessage) ValidateBasic() error { if m.Height < 0 { return errors.New("negative Height") } - if !types.IsVoteTypeValid(m.Type) { + if !consensus.IsVoteTypeValid(m.Type) { return errors.New("invalid Type") } if err := m.BlockID.ValidateBasic(); err != nil { @@ -300,8 +303,8 @@ func (m *VoteSetBitsMessage) ValidateBasic() error { } // NOTE: Votes.Size() can be zero if the node does not have any - if m.Votes.Size() > types.MaxVotesCount { - return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), types.MaxVotesCount) + if m.Votes.Size() > consensus.MaxVotesCount { + return fmt.Errorf("votes bit array is too big: %d, max: %d", m.Votes.Size(), consensus.MaxVotesCount) } return nil @@ -465,7 +468,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { LastCommitRound: msg.NewRoundStep.LastCommitRound, } case *tmcons.Message_NewValidBlock: - pbPartSetHeader, err := types.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) + pbPartSetHeader, err := metadata.PartSetHeaderFromProto(&msg.NewValidBlock.BlockPartSetHeader) if err != nil { return nil, fmt.Errorf("parts header to proto error: %w", err) } @@ -484,7 +487,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { IsCommit: msg.NewValidBlock.IsCommit, } case *tmcons.Message_Proposal: - pbP, err := types.ProposalFromProto(&msg.Proposal.Proposal) + pbP, err := consensus.ProposalFromProto(&msg.Proposal.Proposal) if err != nil { return nil, fmt.Errorf("proposal msg to proto error: %w", err) } @@ -504,7 +507,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { ProposalPOL: pbBits, } case *tmcons.Message_BlockPart: - parts, err := types.PartFromProto(&msg.BlockPart.Part) + parts, err := metadata.PartFromProto(&msg.BlockPart.Part) if err != nil { return nil, fmt.Errorf("blockpart msg to proto error: %w", err) } @@ -514,7 +517,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { Part: parts, } case *tmcons.Message_Vote: - vote, err := types.VoteFromProto(msg.Vote.Vote) + vote, err := consensus.VoteFromProto(msg.Vote.Vote) if err != nil { return nil, fmt.Errorf("vote msg to proto error: %w", err) } @@ -530,7 +533,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { Index: msg.HasVote.Index, } case *tmcons.Message_VoteSetMaj23: - bi, err := types.BlockIDFromProto(&msg.VoteSetMaj23.BlockID) + bi, err := metadata.BlockIDFromProto(&msg.VoteSetMaj23.BlockID) if err != nil { return nil, fmt.Errorf("voteSetMaj23 msg to proto error: %w", err) } @@ -541,7 +544,7 @@ func MsgFromProto(msg *tmcons.Message) (Message, error) { BlockID: *bi, } case *tmcons.Message_VoteSetBits: - bi, err := types.BlockIDFromProto(&msg.VoteSetBits.BlockID) + bi, err := metadata.BlockIDFromProto(&msg.VoteSetBits.BlockID) if err != nil { return nil, fmt.Errorf("block ID to proto error: %w", err) } @@ -574,7 +577,7 @@ func WALToProto(msg WALMessage) (*tmcons.WALMessage, error) { var pb tmcons.WALMessage switch msg := msg.(type) { - case types.EventDataRoundState: + case events.EventDataRoundState: pb = tmcons.WALMessage{ Sum: &tmcons.WALMessage_EventDataRoundState{ EventDataRoundState: &tmproto.EventDataRoundState{ @@ -637,7 +640,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) { switch msg := msg.Sum.(type) { case *tmcons.WALMessage_EventDataRoundState: - pb = types.EventDataRoundState{ + pb = events.EventDataRoundState{ Height: msg.EventDataRoundState.Height, Round: msg.EventDataRoundState.Round, Step: msg.EventDataRoundState.Step, @@ -650,7 +653,7 @@ func WALFromProto(msg *tmcons.WALMessage) (WALMessage, error) { } pb = msgInfo{ Msg: walMsg, - PeerID: types.NodeID(msg.MsgInfo.PeerID), + PeerID: p2p.NodeID(msg.MsgInfo.PeerID), } case *tmcons.WALMessage_TimeoutInfo: diff --git a/internal/consensus/msgs_test.go b/internal/consensus/msgs_test.go index c22ebf5c0..bcd275d52 100644 --- a/internal/consensus/msgs_test.go +++ b/internal/consensus/msgs_test.go @@ -18,18 +18,21 @@ import ( "github.com/tendermint/tendermint/libs/bits" "github.com/tendermint/tendermint/libs/bytes" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) func TestMsgToProto(t *testing.T) { - psh := types.PartSetHeader{ + psh := metadata.PartSetHeader{ Total: 1, Hash: tmrand.Bytes(32), } pbPsh := psh.ToProto() - bi := types.BlockID{ + bi := metadata.BlockID{ Hash: tmrand.Bytes(32), PartSetHeader: psh, } @@ -37,7 +40,7 @@ func TestMsgToProto(t *testing.T) { bits := bits.NewBitArray(1) pbBits := bits.ToProto() - parts := types.Part{ + parts := metadata.Part{ Index: 1, Bytes: []byte("test"), Proof: merkle.Proof{ @@ -50,7 +53,7 @@ func TestMsgToProto(t *testing.T) { pbParts, err := parts.ToProto() require.NoError(t, err) - proposal := types.Proposal{ + proposal := consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 1, @@ -61,9 +64,9 @@ func TestMsgToProto(t *testing.T) { } pbProposal := proposal.ToProto() - pv := types.NewMockPV() + pv := consensus.NewMockPV() vote, err := factory.MakeVote(pv, factory.DefaultTestChainID, - 0, 1, 0, 2, types.BlockID{}, time.Now()) + 0, 1, 0, 2, metadata.BlockID{}, time.Now()) require.NoError(t, err) pbVote := vote.ToProto() @@ -210,7 +213,7 @@ func TestMsgToProto(t *testing.T) { func TestWALMsgProto(t *testing.T) { - parts := types.Part{ + parts := metadata.Part{ Index: 1, Bytes: []byte("test"), Proof: merkle.Proof{ @@ -229,7 +232,7 @@ func TestWALMsgProto(t *testing.T) { want *tmcons.WALMessage wantErr bool }{ - {"successful EventDataRoundState", types.EventDataRoundState{ + {"successful EventDataRoundState", events.EventDataRoundState{ Height: 2, Round: 1, Step: "ronies", @@ -248,7 +251,7 @@ func TestWALMsgProto(t *testing.T) { Round: 1, Part: &parts, }, - PeerID: types.NodeID("string"), + PeerID: p2p.NodeID("string"), }, &tmcons.WALMessage{ Sum: &tmcons.WALMessage_MsgInfo{ MsgInfo: &tmcons.MsgInfo{ @@ -316,13 +319,13 @@ func TestWALMsgProto(t *testing.T) { // nolint:lll //ignore line length for tests func TestConsMsgsVectors(t *testing.T) { date := time.Date(2018, 8, 30, 12, 0, 0, 0, time.UTC) - psh := types.PartSetHeader{ + psh := metadata.PartSetHeader{ Total: 1, Hash: []byte("add_more_exclamation_marks_code-"), } pbPsh := psh.ToProto() - bi := types.BlockID{ + bi := metadata.BlockID{ Hash: []byte("add_more_exclamation_marks_code-"), PartSetHeader: psh, } @@ -330,7 +333,7 @@ func TestConsMsgsVectors(t *testing.T) { bits := bits.NewBitArray(1) pbBits := bits.ToProto() - parts := types.Part{ + parts := metadata.Part{ Index: 1, Bytes: []byte("test"), Proof: merkle.Proof{ @@ -343,7 +346,7 @@ func TestConsMsgsVectors(t *testing.T) { pbParts, err := parts.ToProto() require.NoError(t, err) - proposal := types.Proposal{ + proposal := consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 1, @@ -354,7 +357,7 @@ func TestConsMsgsVectors(t *testing.T) { } pbProposal := proposal.ToProto() - v := &types.Vote{ + v := &consensus.Vote{ ValidatorAddress: []byte("add_more_exclamation"), ValidatorIndex: 1, Height: 1, @@ -431,10 +434,10 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { invalidSignedMsgType tmproto.SignedMsgType = 0x03 ) - validBlockID := types.BlockID{} - invalidBlockID := types.BlockID{ + validBlockID := metadata.BlockID{} + invalidBlockID := metadata.BlockID{ Hash: bytes.HexBytes{}, - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: 1, Hash: []byte{0}, }, @@ -446,7 +449,7 @@ func TestVoteSetMaj23MessageValidateBasic(t *testing.T) { messageHeight int64 testName string messageType tmproto.SignedMsgType - messageBlockID types.BlockID + messageBlockID metadata.BlockID }{ {false, 0, 0, "Valid Message", validSignedMsgType, validBlockID}, {true, -1, 0, "Invalid Message", validSignedMsgType, validBlockID}, @@ -479,15 +482,15 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { {func(msg *VoteSetBitsMessage) { msg.Height = -1 }, "negative Height"}, {func(msg *VoteSetBitsMessage) { msg.Type = 0x03 }, "invalid Type"}, {func(msg *VoteSetBitsMessage) { - msg.BlockID = types.BlockID{ + msg.BlockID = metadata.BlockID{ Hash: bytes.HexBytes{}, - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: 1, Hash: []byte{0}, }, } }, "wrong BlockID: wrong PartSetHeader: wrong Hash:"}, - {func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(types.MaxVotesCount + 1) }, + {func(msg *VoteSetBitsMessage) { msg.Votes = bits.NewBitArray(consensus.MaxVotesCount + 1) }, "votes bit array is too big: 10001, max: 10000"}, } @@ -499,7 +502,7 @@ func TestVoteSetBitsMessageValidateBasic(t *testing.T) { Round: 0, Type: 0x01, Votes: bits.NewBitArray(1), - BlockID: types.BlockID{}, + BlockID: metadata.BlockID{}, } tc.malleateFn(msg) @@ -604,7 +607,9 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { "empty blockParts", }, { - func(msg *NewValidBlockMessage) { msg.BlockParts = bits.NewBitArray(int(types.MaxBlockPartsCount) + 1) }, + func(msg *NewValidBlockMessage) { + msg.BlockParts = bits.NewBitArray(int(metadata.MaxBlockPartsCount) + 1) + }, "blockParts bit array size 1602 not equal to BlockPartSetHeader.Total 1", }, } @@ -615,7 +620,7 @@ func TestNewValidBlockMessageValidateBasic(t *testing.T) { msg := &NewValidBlockMessage{ Height: 1, Round: 0, - BlockPartSetHeader: types.PartSetHeader{ + BlockPartSetHeader: metadata.PartSetHeader{ Total: 1, }, BlockParts: bits.NewBitArray(1), @@ -639,7 +644,7 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { {func(msg *ProposalPOLMessage) { msg.Height = -1 }, "negative Height"}, {func(msg *ProposalPOLMessage) { msg.ProposalPOLRound = -1 }, "negative ProposalPOLRound"}, {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(0) }, "empty ProposalPOL bit array"}, - {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(types.MaxVotesCount + 1) }, + {func(msg *ProposalPOLMessage) { msg.ProposalPOL = bits.NewBitArray(consensus.MaxVotesCount + 1) }, "proposalPOL bit array is too big: 10001, max: 10000"}, } @@ -662,13 +667,13 @@ func TestProposalPOLMessageValidateBasic(t *testing.T) { } func TestBlockPartMessageValidateBasic(t *testing.T) { - testPart := new(types.Part) + testPart := new(metadata.Part) testPart.Proof.LeafHash = tmhash.Sum([]byte("leaf")) testCases := []struct { testName string messageHeight int64 messageRound int32 - messagePart *types.Part + messagePart *metadata.Part expectErr bool }{ {"Valid Message", 0, 0, testPart, false}, @@ -689,7 +694,7 @@ func TestBlockPartMessageValidateBasic(t *testing.T) { }) } - message := BlockPartMessage{Height: 0, Round: 0, Part: new(types.Part)} + message := BlockPartMessage{Height: 0, Round: 0, Part: new(metadata.Part)} message.Part.Index = 1 assert.Equal(t, true, message.ValidateBasic() != nil, "Validate Basic had an unexpected result") diff --git a/internal/consensus/peer_state.go b/internal/consensus/peer_state.go index 73e61f21c..a09804dab 100644 --- a/internal/consensus/peer_state.go +++ b/internal/consensus/peer_state.go @@ -12,8 +12,10 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) var ( @@ -36,7 +38,7 @@ func (pss peerStateStats) String() string { // NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go. // Be mindful of what you Expose. type PeerState struct { - peerID types.NodeID + peerID p2p.NodeID logger log.Logger // NOTE: Modify below using setters, never directly. @@ -50,7 +52,7 @@ type PeerState struct { } // NewPeerState returns a new PeerState for the given node ID. -func NewPeerState(logger log.Logger, peerID types.NodeID) *PeerState { +func NewPeerState(logger log.Logger, peerID p2p.NodeID) *PeerState { return &PeerState{ peerID: peerID, logger: logger, @@ -110,7 +112,7 @@ func (ps *PeerState) GetHeight() int64 { } // SetHasProposal sets the given proposal as known for the peer. -func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { +func (ps *PeerState) SetHasProposal(proposal *consensus.Proposal) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -137,7 +139,7 @@ func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { // InitProposalBlockParts initializes the peer's proposal block parts header // and bit array. -func (ps *PeerState) InitProposalBlockParts(partSetHeader types.PartSetHeader) { +func (ps *PeerState) InitProposalBlockParts(partSetHeader metadata.PartSetHeader) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -165,7 +167,7 @@ func (ps *PeerState) SetHasProposalBlockPart(height int64, round int32, index in // vote was picked. // // NOTE: `votes` must be the correct Size() for the Height(). -func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (*types.Vote, bool) { +func (ps *PeerState) PickVoteToSend(votes consensus.VoteSetReader) (*consensus.Vote, bool) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -199,8 +201,21 @@ func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (*types.Vote, boo return nil, false } +func (ps *PeerState) PickVoteFromCommit(commit *metadata.Commit) (*consensus.Vote, bool) { + psVotes := ps.getVoteBitArray(commit.Height, commit.Round, tmproto.PrecommitType) + if psVotes == nil { + return nil, false // not something worth sending + } + + if index, ok := commit.BitArray().Sub(psVotes).PickRandom(); ok { + return consensus.GetVoteFromCommit(commit, int32(index)), true + } + + return nil, false +} + func (ps *PeerState) getVoteBitArray(height int64, round int32, votesType tmproto.SignedMsgType) *bits.BitArray { - if !types.IsVoteTypeValid(votesType) { + if !consensus.IsVoteTypeValid(votesType) { return nil } @@ -357,7 +372,7 @@ func (ps *PeerState) BlockPartsSent() int { } // SetHasVote sets the given vote as known by the peer -func (ps *PeerState) SetHasVote(vote *types.Vote) { +func (ps *PeerState) SetHasVote(vote *consensus.Vote) { ps.mtx.Lock() defer ps.mtx.Unlock() @@ -404,7 +419,7 @@ func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { if psHeight != msg.Height || psRound != msg.Round { ps.PRS.Proposal = false - ps.PRS.ProposalBlockPartSetHeader = types.PartSetHeader{} + ps.PRS.ProposalBlockPartSetHeader = metadata.PartSetHeader{} ps.PRS.ProposalBlockParts = nil ps.PRS.ProposalPOLRound = -1 ps.PRS.ProposalPOL = nil diff --git a/internal/consensus/reactor.go b/internal/consensus/reactor.go index 2b9fa7358..a7eb50b7f 100644 --- a/internal/consensus/reactor.go +++ b/internal/consensus/reactor.go @@ -12,10 +12,13 @@ import ( tmevents "github.com/tendermint/tendermint/libs/events" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) var ( @@ -125,11 +128,11 @@ type Reactor struct { service.BaseService state *State - eventBus *types.EventBus + eventBus *events.EventBus Metrics *Metrics mtx tmsync.RWMutex - peers map[types.NodeID]*PeerState + peers map[p2ptypes.NodeID]*PeerState waitSync bool stateCh *p2p.Channel @@ -165,7 +168,7 @@ func NewReactor( r := &Reactor{ state: cs, waitSync: waitSync, - peers: make(map[types.NodeID]*PeerState), + peers: make(map[p2ptypes.NodeID]*PeerState), Metrics: NopMetrics(), stateCh: stateCh, dataCh: dataCh, @@ -260,7 +263,7 @@ func (r *Reactor) OnStop() { } // SetEventBus sets the reactor's event bus. -func (r *Reactor) SetEventBus(b *types.EventBus) { +func (r *Reactor) SetEventBus(b *events.EventBus) { r.eventBus = b r.state.SetEventBus(b) } @@ -313,7 +316,7 @@ conR: %+v`, err, r.state, r)) } - d := types.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight} + d := events.EventDataBlockSyncStatus{Complete: true, Height: state.LastBlockHeight} if err := r.eventBus.PublishEventBlockSyncStatus(d); err != nil { r.Logger.Error("failed to emit the blocksync complete event", "err", err) } @@ -346,7 +349,7 @@ func (r *Reactor) StringIndented(indent string) string { } // GetPeerState returns PeerState for a given NodeID. -func (r *Reactor) GetPeerState(peerID types.NodeID) (*PeerState, bool) { +func (r *Reactor) GetPeerState(peerID p2ptypes.NodeID) (*PeerState, bool) { r.mtx.RLock() defer r.mtx.RUnlock() @@ -375,7 +378,7 @@ func (r *Reactor) broadcastNewValidBlockMessage(rs *cstypes.RoundState) { } } -func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) { +func (r *Reactor) broadcastHasVoteMessage(vote *consensus.Vote) { r.stateCh.Out <- p2p.Envelope{ Broadcast: true, Message: &tmcons.HasVote{ @@ -393,7 +396,7 @@ func (r *Reactor) broadcastHasVoteMessage(vote *types.Vote) { func (r *Reactor) subscribeToBroadcastEvents() { err := r.state.evsw.AddListenerForEvent( listenerIDConsensus, - types.EventNewRoundStepValue, + events.EventNewRoundStepValue, func(data tmevents.EventData) { r.broadcastNewRoundStepMessage(data.(*cstypes.RoundState)) select { @@ -408,7 +411,7 @@ func (r *Reactor) subscribeToBroadcastEvents() { err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, - types.EventValidBlockValue, + events.EventValidBlockValue, func(data tmevents.EventData) { r.broadcastNewValidBlockMessage(data.(*cstypes.RoundState)) }, @@ -419,9 +422,9 @@ func (r *Reactor) subscribeToBroadcastEvents() { err = r.state.evsw.AddListenerForEvent( listenerIDConsensus, - types.EventVoteValue, + events.EventVoteValue, func(data tmevents.EventData) { - r.broadcastHasVoteMessage(data.(*types.Vote)) + r.broadcastHasVoteMessage(data.(*consensus.Vote)) }, ) if err != nil { @@ -443,7 +446,7 @@ func makeRoundStepMessage(rs *cstypes.RoundState) *tmcons.NewRoundStep { } } -func (r *Reactor) sendNewRoundStepMessage(peerID types.NodeID) { +func (r *Reactor) sendNewRoundStepMessage(peerID p2ptypes.NodeID) { rs := r.state.GetRoundState() msg := makeRoundStepMessage(rs) r.stateCh.Out <- p2p.Envelope{ @@ -653,7 +656,7 @@ OUTER_LOOP: // pickSendVote picks a vote and sends it to the peer. It will return true if // there is a vote to send and false otherwise. -func (r *Reactor) pickSendVote(ps *PeerState, votes types.VoteSetReader) bool { +func (r *Reactor) pickSendVote(ps *PeerState, votes consensus.VoteSetReader) bool { if vote, ok := ps.PickVoteToSend(votes); ok { r.Logger.Debug("sending vote message", "ps", ps, "vote", vote) r.voteCh.Out <- p2p.Envelope{ @@ -670,6 +673,23 @@ func (r *Reactor) pickSendVote(ps *PeerState, votes types.VoteSetReader) bool { return false } +func (r *Reactor) sendVoteFromCommit(ps *PeerState, commit *metadata.Commit) bool { + if vote, ok := ps.PickVoteFromCommit(commit); ok { + r.Logger.Debug("sending vote message", "ps", ps, "vote", vote) + r.voteCh.Out <- p2p.Envelope{ + To: ps.peerID, + Message: &tmcons.Vote{ + Vote: vote.ToProto(), + }, + } + + ps.SetHasVote(vote) + return true + } + + return false +} + func (r *Reactor) gossipVotesForHeight(rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool { logger := r.Logger.With("height", prs.Height).With("peer", ps.peerID) @@ -781,8 +801,10 @@ OUTER_LOOP: if blockStoreBase > 0 && prs.Height != 0 && rs.Height >= prs.Height+2 && prs.Height >= blockStoreBase { // Load the block commit for prs.Height, which contains precommit // signatures for prs.Height. + // FIXME: It's incredibly inefficient to be sending individual votes to a node that is lagging behind. + // We should instead be gossiping entire commits if commit := r.state.blockStore.LoadBlockCommit(prs.Height); commit != nil { - if r.pickSendVote(ps, commit) { + if r.sendVoteFromCommit(ps, commit) { logger.Debug("picked Catchup commit to send", "height", prs.Height) continue OUTER_LOOP } diff --git a/internal/consensus/reactor_test.go b/internal/consensus/reactor_test.go index 8c70ca1d5..486b43f32 100644 --- a/internal/consensus/reactor_test.go +++ b/internal/consensus/reactor_test.go @@ -15,7 +15,6 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" tmsync "github.com/tendermint/tendermint/internal/libs/sync" @@ -26,11 +25,16 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/evidence" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmcons "github.com/tendermint/tendermint/proto/tendermint/consensus" sm "github.com/tendermint/tendermint/state" statemocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" ) @@ -40,14 +44,14 @@ var ( type reactorTestSuite struct { network *p2ptest.Network - states map[types.NodeID]*State - reactors map[types.NodeID]*Reactor - subs map[types.NodeID]types.Subscription - blocksyncSubs map[types.NodeID]types.Subscription - stateChannels map[types.NodeID]*p2p.Channel - dataChannels map[types.NodeID]*p2p.Channel - voteChannels map[types.NodeID]*p2p.Channel - voteSetBitsChannels map[types.NodeID]*p2p.Channel + states map[p2ptypes.NodeID]*State + reactors map[p2ptypes.NodeID]*Reactor + subs map[p2ptypes.NodeID]events.Subscription + blocksyncSubs map[p2ptypes.NodeID]events.Subscription + stateChannels map[p2ptypes.NodeID]*p2p.Channel + dataChannels map[p2ptypes.NodeID]*p2p.Channel + voteChannels map[p2ptypes.NodeID]*p2p.Channel + voteSetBitsChannels map[p2ptypes.NodeID]*p2p.Channel } func chDesc(chID p2p.ChannelID) p2p.ChannelDescriptor { @@ -61,10 +65,10 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu rts := &reactorTestSuite{ network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - states: make(map[types.NodeID]*State), - reactors: make(map[types.NodeID]*Reactor, numNodes), - subs: make(map[types.NodeID]types.Subscription, numNodes), - blocksyncSubs: make(map[types.NodeID]types.Subscription, numNodes), + states: make(map[p2ptypes.NodeID]*State), + reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes), + subs: make(map[p2ptypes.NodeID]events.Subscription, numNodes), + blocksyncSubs: make(map[p2ptypes.NodeID]events.Subscription, numNodes), } rts.stateChannels = rts.network.MakeChannelsNoCleanup(t, chDesc(StateChannel), new(tmcons.Message), size) @@ -91,10 +95,10 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu reactor.SetEventBus(state.eventBus) - blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, size) + blocksSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, events.EventQueryNewBlock, size) require.NoError(t, err) - fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryBlockSyncStatus, size) + fsSub, err := state.eventBus.Subscribe(context.Background(), testSubscriber, events.EventQueryBlockSyncStatus, size) require.NoError(t, err) rts.states[nodeID] = state @@ -132,7 +136,7 @@ func setup(t *testing.T, numNodes int, states []*State, size int) *reactorTestSu return rts } -func validateBlock(block *types.Block, activeVals map[string]struct{}) error { +func validateBlock(block *block.Block, activeVals map[string]struct{}) error { if block.LastCommit.Size() != len(activeVals) { return fmt.Errorf( "commit size doesn't match number of active validators. Got %d, expected %d", @@ -153,14 +157,14 @@ func waitForAndValidateBlock( t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []events.Subscription, states []*State, txs ...[]byte, ) { fn := func(j int) { msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + newBlock := msg.Data().(events.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) @@ -186,7 +190,7 @@ func waitForAndValidateBlockWithTx( t *testing.T, n int, activeVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []events.Subscription, states []*State, txs ...[]byte, ) { @@ -196,7 +200,7 @@ func waitForAndValidateBlockWithTx( BLOCK_TX_LOOP: for { msg := <-blocksSubs[j].Out() - newBlock := msg.Data().(types.EventDataNewBlock).Block + newBlock := msg.Data().(events.EventDataNewBlock).Block require.NoError(t, validateBlock(newBlock, activeVals)) @@ -231,17 +235,17 @@ func waitForBlockWithUpdatedValsAndValidateIt( t *testing.T, n int, updatedVals map[string]struct{}, - blocksSubs []types.Subscription, + blocksSubs []events.Subscription, css []*State, ) { fn := func(j int) { - var newBlock *types.Block + var newBlock *block.Block LOOP: for { msg := <-blocksSubs[j].Out() - newBlock = msg.Data().(types.EventDataNewBlock).Block + newBlock = msg.Data().(events.EventDataNewBlock).Block if newBlock.LastCommit.Size() == len(updatedVals) { break LOOP } @@ -265,7 +269,7 @@ func waitForBlockWithUpdatedValsAndValidateIt( func ensureBlockSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) { t.Helper() - status, ok := msg.Data().(types.EventDataBlockSyncStatus) + status, ok := msg.Data().(events.EventDataBlockSyncStatus) require.True(t, ok) require.Equal(t, complete, status.Complete) @@ -293,7 +297,7 @@ func TestReactorBasic(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s events.Subscription) { defer wg.Done() <-s.Out() }(sub) @@ -305,7 +309,7 @@ func TestReactorBasic(t *testing.T) { wg.Add(1) // wait till everyone makes the consensus switch - go func(s types.Subscription) { + go func(s events.Subscription) { defer wg.Done() msg := <-s.Out() ensureBlockSyncStatus(t, msg, true, 0) @@ -338,7 +342,7 @@ func TestReactorWithEvidence(t *testing.T) { ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal app := appFunc() - vals := types.TM2PB.ValidatorUpdates(state.Validators) + vals := consensus.TM2PB.ValidatorUpdates(state.Validators) app.InitChain(abci.RequestInitChain{Validators: vals}) pv := privVals[i] @@ -360,12 +364,12 @@ func TestReactorWithEvidence(t *testing.T) { // everyone includes evidence of another double signing vIdx := (i + 1) % n - ev := types.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) + ev := evidence.NewMockDuplicateVoteEvidenceWithValidator(1, defaultTestTime, privVals[vIdx], config.ChainID()) evpool := &statemocks.EvidencePool{} - evpool.On("CheckEvidence", mock.AnythingOfType("types.EvidenceList")).Return(nil) - evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]types.Evidence{ + evpool.On("CheckEvidence", mock.AnythingOfType("p2ptypes.EvidenceList")).Return(nil) + evpool.On("PendingEvidence", mock.AnythingOfType("int64")).Return([]evidence.Evidence{ ev}, int64(len(ev.Bytes()))) - evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("types.EvidenceList")).Return() + evpool.On("Update", mock.AnythingOfType("state.State"), mock.AnythingOfType("p2ptypes.EvidenceList")).Return() evpool2 := sm.EmptyEvidencePool{} @@ -374,7 +378,7 @@ func TestReactorWithEvidence(t *testing.T) { cs.SetLogger(log.TestingLogger().With("module", "consensus")) cs.SetPrivValidator(pv) - eventBus := types.NewEventBus() + eventBus := events.NewEventBus() eventBus.SetLogger(log.TestingLogger().With("module", "events")) err = eventBus.Start() require.NoError(t, err) @@ -399,9 +403,9 @@ func TestReactorWithEvidence(t *testing.T) { // We expect for each validator that is the proposer to propose one piece of // evidence. - go func(s types.Subscription) { + go func(s events.Subscription) { msg := <-s.Out() - block := msg.Data().(types.EventDataNewBlock).Block + block := msg.Data().(events.EventDataNewBlock).Block require.Len(t, block.Evidence.Evidence, 1) wg.Done() @@ -452,7 +456,7 @@ func TestReactorCreatesBlockWhenEmptyBlocksFalse(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s events.Subscription) { <-s.Out() wg.Done() }(sub) @@ -482,7 +486,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s events.Subscription) { <-s.Out() wg.Done() }(sub) @@ -557,7 +561,7 @@ func TestReactorVotingPowerChange(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s events.Subscription) { <-s.Out() wg.Done() }(sub) @@ -565,7 +569,7 @@ func TestReactorVotingPowerChange(t *testing.T) { wg.Wait() - blocksSubs := []types.Subscription{} + blocksSubs := []events.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } @@ -657,7 +661,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { wg.Add(1) // wait till everyone makes the first new block - go func(s types.Subscription) { + go func(s events.Subscription) { <-s.Out() wg.Done() }(sub) @@ -673,7 +677,7 @@ func TestReactorValidatorSetChanges(t *testing.T) { newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - blocksSubs := []types.Subscription{} + blocksSubs := []events.Subscription{} for _, sub := range rts.subs { blocksSubs = append(blocksSubs, sub) } diff --git a/internal/consensus/replay.go b/internal/consensus/replay.go index 9b22f4631..35852f456 100644 --- a/internal/consensus/replay.go +++ b/internal/consensus/replay.go @@ -9,12 +9,14 @@ import ( "reflect" "time" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) var crc32c = crc32.MakeTable(crc32.Castagnoli) @@ -36,7 +38,7 @@ var crc32c = crc32.MakeTable(crc32.Castagnoli) // Unmarshal and apply a single message to the consensus state as if it were // received in receiveRoutine. Lines that start with "#" are ignored. // NOTE: receiveRoutine should not be running. -func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscription) error { +func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub events.Subscription) error { // Skip meta messages which exist for demarcating boundaries. if _, ok := msg.Msg.(EndHeightMessage); ok { return nil @@ -44,14 +46,14 @@ func (cs *State) readReplayMessage(msg *TimedWALMessage, newStepSub types.Subscr // for logging switch m := msg.Msg.(type) { - case types.EventDataRoundState: + case events.EventDataRoundState: cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) // these are playback checks ticker := time.After(time.Second * 2) if newStepSub != nil { select { case stepMsg := <-newStepSub.Out(): - m2 := stepMsg.Data().(types.EventDataRoundState) + m2 := stepMsg.Data().(events.EventDataRoundState) if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { return fmt.Errorf("roundState mismatch. Got %v; Expected %v", m2, m) } @@ -202,21 +204,21 @@ type Handshaker struct { stateStore sm.Store initialState sm.State store sm.BlockStore - eventBus types.BlockEventPublisher - genDoc *types.GenesisDoc + eventBus events.BlockEventPublisher + genDoc *consensus.GenesisDoc logger log.Logger nBlocks int // number of blocks applied to the state } func NewHandshaker(stateStore sm.Store, state sm.State, - store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { + store sm.BlockStore, genDoc *consensus.GenesisDoc) *Handshaker { return &Handshaker{ stateStore: stateStore, initialState: state, store: store, - eventBus: types.NopEventBus{}, + eventBus: events.NopEventBus{}, genDoc: genDoc, logger: log.NewNopLogger(), nBlocks: 0, @@ -229,7 +231,7 @@ func (h *Handshaker) SetLogger(l log.Logger) { // SetEventBus - sets the event bus for publishing block related events. // If not called, it defaults to types.NopEventBus. -func (h *Handshaker) SetEventBus(eventBus types.BlockEventPublisher) { +func (h *Handshaker) SetEventBus(eventBus events.BlockEventPublisher) { h.eventBus = eventBus } @@ -302,12 +304,12 @@ func (h *Handshaker) ReplayBlocks( // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain. if appBlockHeight == 0 { - validators := make([]*types.Validator, len(h.genDoc.Validators)) + validators := make([]*consensus.Validator, len(h.genDoc.Validators)) for i, val := range h.genDoc.Validators { - validators[i] = types.NewValidator(val.PubKey, val.Power) + validators[i] = consensus.NewValidator(val.PubKey, val.Power) } - validatorSet := types.NewValidatorSet(validators) - nextVals := types.TM2PB.ValidatorUpdates(validatorSet) + validatorSet := consensus.NewValidatorSet(validators) + nextVals := consensus.TM2PB.ValidatorUpdates(validatorSet) pbParams := h.genDoc.ConsensusParams.ToProto() req := abci.RequestInitChain{ Time: h.genDoc.GenesisTime, @@ -333,12 +335,12 @@ func (h *Handshaker) ReplayBlocks( } // If the app returned validators or consensus params, update the state. if len(res.Validators) > 0 { - vals, err := types.PB2TM.ValidatorUpdates(res.Validators) + vals, err := consensus.PB2TM.ValidatorUpdates(res.Validators) if err != nil { return nil, err } - state.Validators = types.NewValidatorSet(vals) - state.NextValidators = types.NewValidatorSet(vals).CopyIncrementProposerPriority(1) + state.Validators = consensus.NewValidatorSet(vals) + state.NextValidators = consensus.NewValidatorSet(vals).CopyIncrementProposerPriority(1) } else if len(h.genDoc.Validators) == 0 { // If validator set is not set in genesis and still empty after InitChain, exit. return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") @@ -525,13 +527,13 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap return state, nil } -func assertAppHashEqualsOneFromBlock(appHash []byte, block *types.Block) { - if !bytes.Equal(appHash, block.AppHash) { +func assertAppHashEqualsOneFromBlock(appHash []byte, b *block.Block) { + if !bytes.Equal(appHash, b.AppHash) { panic(fmt.Sprintf(`block.AppHash does not match AppHash after replay. Got %X, expected %X. Block: %v `, - appHash, block.AppHash, block)) + appHash, b.AppHash, b)) } } diff --git a/internal/consensus/replay_file.go b/internal/consensus/replay_file.go index 51cb090d7..068302d57 100644 --- a/internal/consensus/replay_file.go +++ b/internal/consensus/replay_file.go @@ -16,10 +16,10 @@ import ( "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) const ( @@ -54,12 +54,12 @@ func (cs *State) ReplayFile(file string, console bool) error { // ensure all new step events are regenerated as expected ctx := context.Background() - newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := cs.eventBus.Subscribe(ctx, subscriber, events.EventQueryNewRoundStep) if err != nil { - return fmt.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) + return fmt.Errorf("failed to subscribe %s to %v", subscriber, events.EventQueryNewRoundStep) } defer func() { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} + args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: events.EventQueryNewRoundStep} if err := cs.eventBus.Unsubscribe(ctx, args); err != nil { cs.Logger.Error("Error unsubscribing to event bus", "err", err) } @@ -125,7 +125,7 @@ func newPlayback(fileName string, fp *os.File, cs *State, genState sm.State) *pl } // go back count steps by resetting the state and running (pb.count - count) steps -func (pb *playback) replayReset(count int, newStepSub types.Subscription) error { +func (pb *playback) replayReset(count int, newStepSub events.Subscription) error { if err := pb.cs.Stop(); err != nil { return err } @@ -222,12 +222,12 @@ func (pb *playback) replayConsoleLoop() int { ctx := context.Background() // ensure all new step events are regenerated as expected - newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep) + newStepSub, err := pb.cs.eventBus.Subscribe(ctx, subscriber, events.EventQueryNewRoundStep) if err != nil { - tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) + tmos.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, events.EventQueryNewRoundStep)) } defer func() { - args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: types.EventQueryNewRoundStep} + args := tmpubsub.UnsubscribeArgs{Subscriber: subscriber, Query: events.EventQueryNewRoundStep} if err := pb.cs.eventBus.Unsubscribe(ctx, args); err != nil { pb.cs.Logger.Error("Error unsubscribing from eventBus", "err", err) } @@ -318,7 +318,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo tmos.Exit(fmt.Sprintf("Error starting proxy app conns: %v", err)) } - eventBus := types.NewEventBus() + eventBus := events.NewEventBus() if err := eventBus.Start(); err != nil { tmos.Exit(fmt.Sprintf("Failed to start event bus: %v", err)) } diff --git a/internal/consensus/replay_stubs.go b/internal/consensus/replay_stubs.go index c79340a0c..e9df0cbf7 100644 --- a/internal/consensus/replay_stubs.go +++ b/internal/consensus/replay_stubs.go @@ -3,12 +3,12 @@ package consensus import ( "context" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" mempl "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/mempool" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- @@ -20,14 +20,14 @@ var _ mempl.Mempool = emptyMempool{} func (emptyMempool) Lock() {} func (emptyMempool) Unlock() {} func (emptyMempool) Size() int { return 0 } -func (emptyMempool) CheckTx(_ context.Context, _ types.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { +func (emptyMempool) CheckTx(_ context.Context, _ mempool.Tx, _ func(*abci.Response), _ mempl.TxInfo) error { return nil } -func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) types.Txs { return types.Txs{} } -func (emptyMempool) ReapMaxTxs(n int) types.Txs { return types.Txs{} } +func (emptyMempool) ReapMaxBytesMaxGas(_, _ int64) mempool.Txs { return mempool.Txs{} } +func (emptyMempool) ReapMaxTxs(n int) mempool.Txs { return mempool.Txs{} } func (emptyMempool) Update( _ int64, - _ types.Txs, + _ mempool.Txs, _ []*abci.ResponseDeliverTx, _ mempl.PreCheckFunc, _ mempl.PostCheckFunc, diff --git a/internal/consensus/replay_test.go b/internal/consensus/replay_test.go index 4d1c9c6b2..031f4bd41 100644 --- a/internal/consensus/replay_test.go +++ b/internal/consensus/replay_test.go @@ -20,7 +20,6 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" @@ -28,6 +27,11 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/privval" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" @@ -35,7 +39,6 @@ import ( sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) // These tests ensure we can always recover from failure at any part of the consensus process. @@ -84,7 +87,7 @@ func startNewStateAndWaitForBlock(t *testing.T, consensusReplayConfig *cfg.Confi // in the WAL itself. Assuming the consensus state is running, replay of any // WAL, including the empty one, should eventually be followed by a new // block, or else something is wrong. - newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock) + newBlockSub, err := cs.eventBus.Subscribe(context.Background(), testSubscriber, events.EventQueryNewBlock) require.NoError(t, err) select { case <-newBlockSub.Out(): @@ -286,8 +289,8 @@ func (w *crashingWAL) Wait() { w.next.Wait() } type simulatorTestSuite struct { GenesisState sm.State Config *cfg.Config - Chain []*types.Block - Commits []*types.Commit + Chain []*block.Block + Commits []*metadata.Commit CleanupFunc cleanupFunc Mempool mempl.Mempool @@ -331,10 +334,10 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { sim.GenesisState, _ = sm.MakeGenesisState(genDoc) sim.CleanupFunc = cleanup - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - newRoundCh := subscribe(css[0].eventBus, types.EventQueryNewRound) - proposalCh := subscribe(css[0].eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(css[0].eventBus, events.EventQueryNewRound) + proposalCh := subscribe(css[0].eventBus, events.EventQueryCompleteProposal) vss := make([]*validatorStub, nPeers) for i := 0; i < nPeers; i++ { @@ -367,9 +370,9 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { assert.Nil(t, err) propBlock, _ := css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + blockID := metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vss[1].Height, round, -1, blockID) + proposal := consensus.NewProposal(vss[1].Height, round, -1, blockID) p := proposal.ToProto() if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -399,9 +402,9 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) - blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + blockID = metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal = types.NewProposal(vss[2].Height, round, -1, blockID) + proposal = consensus.NewProposal(vss[2].Height, round, -1, blockID) p = proposal.ToProto() if err := vss[2].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -438,7 +441,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) - blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + blockID = metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} newVss := make([]*validatorStub, nVals+1) copy(newVss, vss[:nVals+1]) sort.Sort(ValidatorStubsByPower(newVss)) @@ -460,7 +463,7 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { selfIndex := valIndexFn(0) - proposal = types.NewProposal(vss[3].Height, round, -1, blockID) + proposal = consensus.NewProposal(vss[3].Height, round, -1, blockID) p = proposal.ToProto() if err := vss[3].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -517,13 +520,13 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { assert.Nil(t, err) propBlock, _ = css[0].createProposalBlock() // changeProposer(t, cs1, vs2) propBlockParts = propBlock.MakePartSet(partSize) - blockID = types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + blockID = metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} newVss = make([]*validatorStub, nVals+3) copy(newVss, vss[:nVals+3]) sort.Sort(ValidatorStubsByPower(newVss)) selfIndex = valIndexFn(0) - proposal = types.NewProposal(vss[1].Height, round, -1, blockID) + proposal = consensus.NewProposal(vss[1].Height, round, -1, blockID) p = proposal.ToProto() if err := vss[1].SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -546,8 +549,8 @@ func setupSimulator(t *testing.T) *simulatorTestSuite { } ensureNewRound(newRoundCh, height+1, 0) - sim.Chain = make([]*types.Block, 0) - sim.Commits = make([]*types.Commit, 0) + sim.Chain = make([]*block.Block, 0) + sim.Commits = make([]*metadata.Commit, 0) for i := 1; i <= numBlocks; i++ { sim.Chain = append(sim.Chain, css[0].blockStore.LoadBlock(int64(i))) sim.Commits = append(sim.Commits, css[0].blockStore.LoadBlockCommit(int64(i))) @@ -680,8 +683,8 @@ func tempWALWithData(data []byte) string { // Make some blocks. Start a fresh app and apply nBlocks blocks. // Then restart the app and sync it up with the remaining blocks func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mode uint, testValidatorsChange bool) { - var chain []*types.Block - var commits []*types.Commit + var chain []*block.Block + var commits []*metadata.Commit var store *mockBlockStore var stateDB dbm.DB var genesisState sm.State @@ -695,7 +698,7 @@ func testHandshakeReplay(t *testing.T, sim *simulatorTestSuite, nBlocks int, mod genesisState = sim.GenesisState config = sim.Config - chain = append([]*types.Block{}, sim.Chain...) // copy chain + chain = append([]*block.Block{}, sim.Chain...) // copy chain commits = sim.Commits store = newMockBlockStore(config, genesisState.ConsensusParams) } else { // test single node @@ -813,13 +816,13 @@ func applyBlock(stateStore sm.Store, mempool mempl.Mempool, evpool sm.EvidencePool, st sm.State, - blk *types.Block, + blk *block.Block, proxyApp proxy.AppConns, blockStore *mockBlockStore) sm.State { - testPartSize := types.BlockPartSizeBytes + testPartSize := metadata.BlockPartSizeBytes blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool, blockStore) - blkID := types.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} + blkID := metadata.BlockID{Hash: blk.Hash(), PartSetHeader: blk.MakePartSet(testPartSize).Header()} newState, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) @@ -833,7 +836,7 @@ func buildAppStateFromChain( mempool mempl.Mempool, evpool sm.EvidencePool, state sm.State, - chain []*types.Block, + chain []*block.Block, nBlocks int, mode uint, blockStore *mockBlockStore) { @@ -844,7 +847,7 @@ func buildAppStateFromChain( defer proxyApp.Stop() //nolint:errcheck // ignore state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version - validators := types.TM2PB.ValidatorUpdates(state.Validators) + validators := consensus.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ Validators: validators, }); err != nil { @@ -882,7 +885,7 @@ func buildTMStateFromChain( evpool sm.EvidencePool, stateStore sm.Store, state sm.State, - chain []*types.Block, + chain []*block.Block, nBlocks int, mode uint, blockStore *mockBlockStore) sm.State { @@ -899,7 +902,7 @@ func buildTMStateFromChain( defer proxyApp.Stop() //nolint:errcheck state.Version.Consensus.App = kvstore.ProtocolVersion // simulate handshake, receive app version - validators := types.TM2PB.ValidatorUpdates(state.Validators) + validators := consensus.TM2PB.ValidatorUpdates(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(context.Background(), abci.RequestInitChain{ Validators: validators, }); err != nil { @@ -1026,7 +1029,7 @@ func (app *badApp) Commit() abci.ResponseCommit { //-------------------------- // utils for making blocks -func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { +func makeBlockchainFromWAL(wal WAL) ([]*block.Block, []*metadata.Commit, error) { var height int64 // Search for height marker @@ -1042,10 +1045,10 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { // log.Notice("Build a blockchain by reading from the WAL") var ( - blocks []*types.Block - commits []*types.Commit - thisBlockParts *types.PartSet - thisBlockCommit *types.Commit + blocks []*block.Block + commits []*metadata.Commit + thisBlockParts *metadata.PartSet + thisBlockCommit *metadata.Commit ) dec := NewWALDecoder(gr) @@ -1075,7 +1078,7 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { if err != nil { panic(err) } - block, err := types.BlockFromProto(pbb) + block, err := block.BlockFromProto(pbb) if err != nil { panic(err) } @@ -1091,17 +1094,17 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { commits = append(commits, thisBlockCommit) height++ } - case *types.PartSetHeader: - thisBlockParts = types.NewPartSetFromHeader(*p) - case *types.Part: + case *metadata.PartSetHeader: + thisBlockParts = metadata.NewPartSetFromHeader(*p) + case *metadata.Part: _, err := thisBlockParts.AddPart(p) if err != nil { return nil, nil, err } - case *types.Vote: + case *consensus.Vote: if p.Type == tmproto.PrecommitType { - thisBlockCommit = types.NewCommit(p.Height, p.Round, - p.BlockID, []types.CommitSig{p.CommitSig()}) + thisBlockCommit = metadata.NewCommit(p.Height, p.Round, + p.BlockID, []metadata.CommitSig{p.CommitSig()}) } } } @@ -1115,18 +1118,18 @@ func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { if err != nil { panic(err) } - block, err := types.BlockFromProto(pbb) + b, err := block.BlockFromProto(pbb) if err != nil { panic(err) } - if block.Height != height+1 { - panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", block.Height, height+1)) + if b.Height != height+1 { + panic(fmt.Sprintf("read bad block from wal. got height %d, expected %d", b.Height, height+1)) } commitHeight := thisBlockCommit.Height if commitHeight != height+1 { panic(fmt.Sprintf("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) } - blocks = append(blocks, block) + blocks = append(blocks, b) commits = append(commits, thisBlockCommit) return blocks, commits, nil } @@ -1171,39 +1174,39 @@ func stateAndStore( type mockBlockStore struct { config *cfg.Config - params types.ConsensusParams - chain []*types.Block - commits []*types.Commit + params consensus.ConsensusParams + chain []*block.Block + commits []*metadata.Commit base int64 } // TODO: NewBlockStore(db.NewMemDB) ... -func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { +func newMockBlockStore(config *cfg.Config, params consensus.ConsensusParams) *mockBlockStore { return &mockBlockStore{config, params, nil, nil, 0} } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } func (bs *mockBlockStore) Base() int64 { return bs.base } func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } -func (bs *mockBlockStore) LoadBaseMeta() *types.BlockMeta { return bs.LoadBlockMeta(bs.base) } -func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } -func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { +func (bs *mockBlockStore) LoadBaseMeta() *block.BlockMeta { return bs.LoadBlockMeta(bs.base) } +func (bs *mockBlockStore) LoadBlock(height int64) *block.Block { return bs.chain[height-1] } +func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *block.Block { return bs.chain[int64(len(bs.chain))-1] } -func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { - block := bs.chain[height-1] - return &types.BlockMeta{ - BlockID: types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()}, - Header: block.Header, +func (bs *mockBlockStore) LoadBlockMeta(height int64) *block.BlockMeta { + b := bs.chain[height-1] + return &block.BlockMeta{ + BlockID: metadata.BlockID{Hash: b.Hash(), PartSetHeader: b.MakePartSet(metadata.BlockPartSizeBytes).Header()}, + Header: b.Header, } } -func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *metadata.Part { return nil } +func (bs *mockBlockStore) SaveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) { } -func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { +func (bs *mockBlockStore) LoadBlockCommit(height int64) *metadata.Commit { return bs.commits[height-1] } -func (bs *mockBlockStore) LoadSeenCommit() *types.Commit { +func (bs *mockBlockStore) LoadSeenCommit() *metadata.Commit { return bs.commits[len(bs.commits)-1] } @@ -1223,8 +1226,8 @@ func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { func TestHandshakeUpdatesValidators(t *testing.T) { val, _ := factory.RandValidator(true, 10) - vals := types.NewValidatorSet([]*types.Validator{val}) - app := &initChainApp{vals: types.TM2PB.ValidatorUpdates(vals)} + vals := consensus.NewValidatorSet([]*consensus.Validator{val}) + app := &initChainApp{vals: consensus.TM2PB.ValidatorUpdates(vals)} clientCreator := proxy.NewLocalClientCreator(app) config := ResetConfig("handshake_test_") diff --git a/internal/consensus/state.go b/internal/consensus/state.go index 4da989b40..3873b6982 100644 --- a/internal/consensus/state.go +++ b/internal/consensus/state.go @@ -24,11 +24,16 @@ import ( tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/libs/service" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/block" + types "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) // Consensus sentinel errors @@ -45,8 +50,8 @@ var msgQueueSize = 1000 // msgs from the reactor which may update the state type msgInfo struct { - Msg Message `json:"msg"` - PeerID types.NodeID `json:"peer_key"` + Msg Message `json:"msg"` + PeerID p2ptypes.NodeID `json:"peer_key"` } // internally generated messages which may update the state @@ -117,7 +122,7 @@ type State struct { // we use eventBus to trigger msg broadcasts in the reactor, // and to notify external subscribers, eg. through a websocket - eventBus *types.EventBus + eventBus *events.EventBus // a Write-Ahead Log ensures we can recover from any kind of crash // and helps us avoid signing conflicting votes @@ -207,7 +212,7 @@ func (cs *State) SetLogger(l log.Logger) { } // SetEventBus sets event bus. -func (cs *State) SetEventBus(b *types.EventBus) { +func (cs *State) SetEventBus(b *events.EventBus) { cs.eventBus = b cs.blockExec.SetEventBus(b) } @@ -309,7 +314,7 @@ func (cs *State) SetTimeoutTicker(timeoutTicker TimeoutTicker) { } // LoadCommit loads the commit for a given height. -func (cs *State) LoadCommit(height int64) *types.Commit { +func (cs *State) LoadCommit(height int64) *metadata.Commit { cs.mtx.RLock() defer cs.mtx.RUnlock() @@ -500,7 +505,7 @@ func (cs *State) OpenWAL(walFile string) (WAL, error) { // TODO: should these return anything or let callers just use events? // AddVote inputs a vote. -func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { +func (cs *State) AddVote(vote *types.Vote, peerID p2ptypes.NodeID) (added bool, err error) { if peerID == "" { cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} } else { @@ -512,7 +517,7 @@ func (cs *State) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err } // SetProposal inputs a proposal. -func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) error { +func (cs *State) SetProposal(proposal *types.Proposal, peerID p2ptypes.NodeID) error { if peerID == "" { cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} @@ -525,7 +530,7 @@ func (cs *State) SetProposal(proposal *types.Proposal, peerID types.NodeID) erro } // AddProposalBlockPart inputs a part of the proposal block. -func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Part, peerID types.NodeID) error { +func (cs *State) AddProposalBlockPart(height int64, round int32, part *metadata.Part, peerID p2ptypes.NodeID) error { if peerID == "" { cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} @@ -540,9 +545,9 @@ func (cs *State) AddProposalBlockPart(height int64, round int32, part *types.Par // SetProposalAndBlock inputs the proposal and all block parts. func (cs *State) SetProposalAndBlock( proposal *types.Proposal, - block *types.Block, - parts *types.PartSet, - peerID types.NodeID, + block *block.Block, + parts *metadata.PartSet, + peerID p2ptypes.NodeID, ) error { if err := cs.SetProposal(proposal, peerID); err != nil { @@ -613,7 +618,7 @@ func (cs *State) reconstructLastCommit(state sm.State) { )) } - lastPrecommits := types.CommitToVoteSet(state.ChainID, commit, state.LastValidators) + lastPrecommits := types.VoteSetFromCommit(state.ChainID, commit, state.LastValidators) if !lastPrecommits.HasTwoThirdsMajority() { panic("failed to reconstruct last commit; does not have +2/3 maj") } @@ -744,7 +749,7 @@ func (cs *State) newStep() { cs.Logger.Error("failed publishing new round step", "err", err) } - cs.evsw.FireEvent(types.EventNewRoundStepValue, &cs.RoundState) + cs.evsw.FireEvent(events.EventNewRoundStepValue, &cs.RoundState) } } @@ -1161,8 +1166,8 @@ func (cs *State) isProposer(address []byte) bool { } func (cs *State) defaultDecideProposal(height int64, round int32) { - var block *types.Block - var blockParts *types.PartSet + var block *block.Block + var blockParts *metadata.PartSet // Decide on block if cs.ValidBlock != nil { @@ -1183,7 +1188,7 @@ func (cs *State) defaultDecideProposal(height int64, round int32) { } // Make proposal - propBlockID := types.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} + propBlockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: blockParts.Header()} proposal := types.NewProposal(height, round, cs.ValidRound, propBlockID) p := proposal.ToProto() @@ -1230,17 +1235,17 @@ func (cs *State) isProposalComplete() bool { // // NOTE: keep it side-effect free for clarity. // CONTRACT: cs.privValidator is not nil. -func (cs *State) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { +func (cs *State) createProposalBlock() (block *block.Block, blockParts *metadata.PartSet) { if cs.privValidator == nil { panic("entered createProposalBlock with privValidator being nil") } - var commit *types.Commit + var commit *metadata.Commit switch { case cs.Height == cs.state.InitialHeight: // We're creating a proposal for the first block. // The commit is empty, but not nil. - commit = types.NewCommit(0, 0, types.BlockID{}, nil) + commit = metadata.NewCommit(0, 0, metadata.BlockID{}, nil) case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit @@ -1306,7 +1311,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { // If ProposalBlock is nil, prevote nil. if cs.ProposalBlock == nil { logger.Debug("prevote step: ProposalBlock is nil") - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrevoteType, nil, metadata.PartSetHeader{}) return } @@ -1315,7 +1320,7 @@ func (cs *State) defaultDoPrevote(height int64, round int32) { if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("prevote step: ProposalBlock is invalid", "err", err) - cs.signAddVote(tmproto.PrevoteType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrevoteType, nil, metadata.PartSetHeader{}) return } @@ -1393,7 +1398,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { logger.Debug("precommit step; no +2/3 prevotes during enterPrecommit; precommitting nil") } - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrecommitType, nil, metadata.PartSetHeader{}) return } @@ -1423,7 +1428,7 @@ func (cs *State) enterPrecommit(height int64, round int32) { } } - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrecommitType, nil, metadata.PartSetHeader{}) return } @@ -1474,14 +1479,14 @@ func (cs *State) enterPrecommit(height int64, round int32) { if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { cs.ProposalBlock = nil - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + cs.ProposalBlockParts = metadata.NewPartSetFromHeader(blockID.PartSetHeader) } if err := cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()); err != nil { logger.Error("failed publishing event unlock", "err", err) } - cs.signAddVote(tmproto.PrecommitType, nil, types.PartSetHeader{}) + cs.signAddVote(tmproto.PrecommitType, nil, metadata.PartSetHeader{}) } // Enter: any +2/3 precommits for next round. @@ -1568,13 +1573,13 @@ func (cs *State) enterCommit(height int64, commitRound int32) { // We're getting the wrong block. // Set up ProposalBlockParts and keep waiting. cs.ProposalBlock = nil - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + cs.ProposalBlockParts = metadata.NewPartSetFromHeader(blockID.PartSetHeader) if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { logger.Error("failed publishing valid block", "err", err) } - cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState) + cs.evsw.FireEvent(events.EventValidBlockValue, &cs.RoundState) } } } @@ -1690,7 +1695,7 @@ func (cs *State) finalizeCommit(height int64) { // NOTE The block.AppHash wont reflect these txs until the next block. stateCopy, err := cs.blockExec.ApplyBlock( stateCopy, - types.BlockID{ + metadata.BlockID{ Hash: block.Hash(), PartSetHeader: blockParts.Header(), }, @@ -1726,7 +1731,7 @@ func (cs *State) finalizeCommit(height int64) { // * cs.StartTime is set to when we will start round0. } -func (cs *State) RecordMetrics(height int64, block *types.Block) { +func (cs *State) RecordMetrics(height int64, block *block.Block) { cs.metrics.Validators.Set(float64(cs.Validators.Size())) cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) @@ -1791,7 +1796,7 @@ func (cs *State) RecordMetrics(height int64, block *types.Block) { ) for _, ev := range block.Evidence.Evidence { - if dve, ok := ev.(*types.DuplicateVoteEvidence); ok { + if dve, ok := ev.(*evidence.DuplicateVoteEvidence); ok { if _, val := cs.Validators.GetByAddress(dve.VoteA.ValidatorAddress); val != nil { byzantineValidatorsCount++ byzantineValidatorsPower += val.VotingPower @@ -1850,7 +1855,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { // This happens if we're already in cstypes.RoundStepCommit or if there is a valid block in the current round. // TODO: We can check if Proposal is for a different block as this is a sign of misbehavior! if cs.ProposalBlockParts == nil { - cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) + cs.ProposalBlockParts = metadata.NewPartSetFromHeader(proposal.BlockID.PartSetHeader) } cs.Logger.Info("received proposal", "proposal", proposal) @@ -1860,7 +1865,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { // NOTE: block is not necessarily valid. // Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, // once we have the full block. -func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID) (added bool, err error) { +func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID p2ptypes.NodeID) (added bool, err error) { height, round, part := msg.Height, msg.Round, msg.Part // Blocks might be reused, so round mismatch is OK @@ -1904,7 +1909,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID return added, err } - block, err := types.BlockFromProto(pbb) + block, err := block.BlockFromProto(pbb) if err != nil { return added, err } @@ -1958,7 +1963,7 @@ func (cs *State) addProposalBlockPart(msg *BlockPartMessage, peerID types.NodeID } // Attempt to add the vote. if its a duplicate signature, dupeout the validator -func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) { +func (cs *State) tryAddVote(vote *types.Vote, peerID p2ptypes.NodeID) (bool, error) { added, err := cs.addVote(vote, peerID) if err != nil { // If the vote height is off, we'll just ignore it, @@ -2006,7 +2011,7 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID types.NodeID) (bool, error) return added, nil } -func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { +func (cs *State) addVote(vote *types.Vote, peerID p2ptypes.NodeID) (added bool, err error) { cs.Logger.Debug( "adding vote", "vote_height", vote.Height, @@ -2030,11 +2035,11 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err } cs.Logger.Debug("added vote to last precommits", "last_commit", cs.LastCommit.StringShort()) - if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { + if err := cs.eventBus.PublishEventVote(events.EventDataVote{Vote: vote}); err != nil { return added, err } - cs.evsw.FireEvent(types.EventVoteValue, vote) + cs.evsw.FireEvent(events.EventVoteValue, vote) // if we can skip timeoutCommit and have all the votes now, if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { @@ -2060,10 +2065,10 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err return } - if err := cs.eventBus.PublishEventVote(types.EventDataVote{Vote: vote}); err != nil { + if err := cs.eventBus.PublishEventVote(events.EventDataVote{Vote: vote}); err != nil { return added, err } - cs.evsw.FireEvent(types.EventVoteValue, vote) + cs.evsw.FireEvent(events.EventVoteValue, vote) switch vote.Type { case tmproto.PrevoteType: @@ -2114,10 +2119,10 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err } if !cs.ProposalBlockParts.HasHeader(blockID.PartSetHeader) { - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartSetHeader) + cs.ProposalBlockParts = metadata.NewPartSetFromHeader(blockID.PartSetHeader) } - cs.evsw.FireEvent(types.EventValidBlockValue, &cs.RoundState) + cs.evsw.FireEvent(events.EventValidBlockValue, &cs.RoundState) if err := cs.eventBus.PublishEventValidBlock(cs.RoundStateEvent()); err != nil { return added, err } @@ -2184,7 +2189,7 @@ func (cs *State) addVote(vote *types.Vote, peerID types.NodeID) (added bool, err func (cs *State) signVote( msgType tmproto.SignedMsgType, hash []byte, - header types.PartSetHeader, + header metadata.PartSetHeader, ) (*types.Vote, error) { // Flush the WAL. Otherwise, we may not recompute the same vote to sign, // and the privValidator will refuse to sign anything. @@ -2206,7 +2211,7 @@ func (cs *State) signVote( Round: cs.Round, Timestamp: cs.voteTime(), Type: msgType, - BlockID: types.BlockID{Hash: hash, PartSetHeader: header}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: header}, } v := vote.ToProto() @@ -2259,7 +2264,7 @@ func (cs *State) voteTime() time.Time { } // sign the vote and publish on internalMsgQueue -func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { +func (cs *State) signAddVote(msgType tmproto.SignedMsgType, hash []byte, header metadata.PartSetHeader) *types.Vote { if cs.privValidator == nil { // the node does not have a key return nil } @@ -2332,7 +2337,7 @@ func (cs *State) checkDoubleSigningRisk(height int64) error { lastCommit := cs.LoadCommit(height - i) if lastCommit != nil { for sigIdx, s := range lastCommit.Signatures { - if s.BlockIDFlag == types.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { + if s.BlockIDFlag == metadata.BlockIDFlagCommit && bytes.Equal(s.ValidatorAddress, valAddr) { cs.Logger.Info("found signature from the same key", "sig", s, "idx", sigIdx, "height", height-i) return ErrSignatureFoundInPastBlocks } diff --git a/internal/consensus/state_test.go b/internal/consensus/state_test.go index b3b7c81a3..ee2d104e0 100644 --- a/internal/consensus/state_test.go +++ b/internal/consensus/state_test.go @@ -17,8 +17,11 @@ import ( "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) /* @@ -61,8 +64,8 @@ func TestStateProposerSelection0(t *testing.T) { cs1, vss := randState(config, 4) height, round := cs1.Height, cs1.Round - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) startTestRound(cs1, height, round) @@ -102,7 +105,7 @@ func TestStateProposerSelection2(t *testing.T) { cs1, vss := randState(config, 4) // test needs more work for more than 3 validators height := cs1.Height - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) // this time we jump in at round 2 incrementRound(vss[1:]...) @@ -144,7 +147,7 @@ func TestStateEnterProposeNoPrivValidator(t *testing.T) { height, round := cs.Height, cs.Round // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) + timeoutCh := subscribe(cs.eventBus, events.EventQueryTimeoutPropose) startTestRound(cs, height, round) @@ -165,8 +168,8 @@ func TestStateEnterProposeYesPrivValidator(t *testing.T) { // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + timeoutCh := subscribe(cs.eventBus, events.EventQueryTimeoutPropose) + proposalCh := subscribe(cs.eventBus, events.EventQueryCompleteProposal) cs.enterNewRound(height, round) cs.startRoutines(3) @@ -196,10 +199,10 @@ func TestStateBadProposal(t *testing.T) { height, round := cs1.Height, cs1.Round vs2 := vss[1] - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + voteCh := subscribe(cs1.eventBus, events.EventQueryVote) propBlock, _ := cs1.createProposalBlock() // changeProposer(t, cs1, vs2) @@ -215,8 +218,8 @@ func TestStateBadProposal(t *testing.T) { stateHash[0] = (stateHash[0] + 1) % 255 propBlock.AppHash = stateHash propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(vs2.Height, round, -1, blockID) + blockID := metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := consensus.NewProposal(vs2.Height, round, -1, blockID) p := proposal.ToProto() if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -257,13 +260,13 @@ func TestStateOversizedBlock(t *testing.T) { height, round := cs1.Height, cs1.Round vs2 := vss[1] - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) + timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + voteCh := subscribe(cs1.eventBus, events.EventQueryVote) propBlock, _ := cs1.createProposalBlock() - propBlock.Data.Txs = []types.Tx{tmrand.Bytes(2001)} + propBlock.Data.Txs = []mempool.Tx{tmrand.Bytes(2001)} propBlock.Header.DataHash = propBlock.Data.Hash() // make the second validator the proposer by incrementing round @@ -271,8 +274,8 @@ func TestStateOversizedBlock(t *testing.T) { incrementRound(vss[1:]...) propBlockParts := propBlock.MakePartSet(partSize) - blockID := types.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} - proposal := types.NewProposal(height, round, -1, blockID) + blockID := metadata.BlockID{Hash: propBlock.Hash(), PartSetHeader: propBlockParts.Header()} + proposal := consensus.NewProposal(height, round, -1, blockID) p := proposal.ToProto() if err := vs2.SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal("failed to sign bad proposal", err) @@ -324,16 +327,16 @@ func TestStateFullRound1(t *testing.T) { if err := cs.eventBus.Stop(); err != nil { t.Error(err) } - eventBus := types.NewEventBusWithBufferCapacity(0) + eventBus := events.NewEventBusWithBufferCapacity(0) eventBus.SetLogger(log.TestingLogger().With("module", "events")) cs.SetEventBus(eventBus) if err := eventBus.Start(); err != nil { t.Error(err) } - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) - propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + voteCh := subscribeUnBuffered(cs.eventBus, events.EventQueryVote) + propCh := subscribe(cs.eventBus, events.EventQueryCompleteProposal) + newRoundCh := subscribe(cs.eventBus, events.EventQueryNewRound) // Maybe it would be better to call explicitly startRoutines(4) startTestRound(cs, height, round) @@ -361,7 +364,7 @@ func TestStateFullRoundNil(t *testing.T) { cs, vss := randState(config, 1) height, round := cs.Height, cs.Round - voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) + voteCh := subscribeUnBuffered(cs.eventBus, events.EventQueryVote) cs.enterPrevote(height, round) cs.startRoutines(4) @@ -382,8 +385,8 @@ func TestStateFullRound2(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + voteCh := subscribeUnBuffered(cs1.eventBus, events.EventQueryVote) + newBlockCh := subscribe(cs1.eventBus, events.EventQueryNewBlock) // start round and wait for propose and prevote startTestRound(cs1, height, round) @@ -424,13 +427,13 @@ func TestStateLockNoPOL(t *testing.T) { vs2 := vss[1] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribeUnBuffered(cs1.eventBus, types.EventQueryVote) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + voteCh := subscribeUnBuffered(cs1.eventBus, events.EventQueryVote) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) /* Round1 (cs1, B) // B B // B B2 @@ -614,16 +617,16 @@ func TestStateLockPOLRelock(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + newBlockCh := subscribe(cs1.eventBus, events.EventQueryNewBlockHeader) // everything done from perspective of cs1 @@ -651,7 +654,7 @@ func TestStateLockPOLRelock(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) @@ -713,12 +716,12 @@ func TestStateLockPOLUnlock(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, events.EventQueryUnlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -750,7 +753,7 @@ func TestStateLockPOLUnlock(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs4) signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) // before we time out into new round, set next proposal block @@ -782,7 +785,7 @@ func TestStateLockPOLUnlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], lockedBlockHash) // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) // the polka makes us unlock and precommit nil ensureNewUnlock(unlockCh, height, round) @@ -792,7 +795,7 @@ func TestStateLockPOLUnlock(t *testing.T) { // NOTE: since we don't relock on nil, the lock round is -1 validatePrecommit(t, cs1, round, -1, vss[0], nil, nil) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3) ensureNewRound(newRoundCh, height, round+1) } @@ -807,15 +810,15 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) // everything done from perspective of cs1 /* @@ -840,7 +843,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], firstBlockHash, firstBlockHash) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal cs2 := newState(cs1.state, vs2, kvstore.NewApplication()) @@ -884,7 +887,7 @@ func TestStateLockPOLUnlockOnUnknownBlock(t *testing.T) { } // more prevote creating a majority on the new block and this is then committed - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) // before we timeout to the new round set the new proposal cs3 := newState(cs1.state, vs3, kvstore.NewApplication()) @@ -935,12 +938,12 @@ func TestStateLockPOLSafety1(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -965,7 +968,7 @@ func TestStateLockPOLSafety1(t *testing.T) { t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) // we do see them precommit nil - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) // cs1 precommit nil ensurePrecommit(voteCh, height, round) @@ -1011,7 +1014,7 @@ func TestStateLockPOLSafety1(t *testing.T) { // we should have precommitted validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1033,7 +1036,7 @@ func TestStateLockPOLSafety1(t *testing.T) { // we should prevote what we're locked on validatePrevote(t, cs1, round, vss[0], propBlockHash) - newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) + newStepCh := subscribe(cs1.eventBus, events.EventQueryNewRoundStep) // before prevotes from the previous round are added // add prevotes from the earlier round @@ -1058,12 +1061,12 @@ func TestStateLockPOLSafety2(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, events.EventQueryUnlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1074,7 +1077,7 @@ func TestStateLockPOLSafety2(t *testing.T) { _, propBlock0 := decideProposal(cs1, vss[0], height, round) propBlockHash0 := propBlock0.Hash() propBlockParts0 := propBlock0.MakePartSet(partSize) - propBlockID0 := types.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} + propBlockID0 := metadata.BlockID{Hash: propBlockHash0, PartSetHeader: propBlockParts0.Header()} // the others sign a polka but we don't see it prevotes := signVotes(config, tmproto.PrevoteType, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) @@ -1107,7 +1110,7 @@ func TestStateLockPOLSafety2(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], propBlockHash1, propBlockHash1) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs4) signAddVotes(config, cs1, tmproto.PrecommitType, propBlockHash1, propBlockParts1.Header(), vs3) incrementRound(vs2, vs3, vs4) @@ -1117,7 +1120,7 @@ func TestStateLockPOLSafety2(t *testing.T) { round++ // moving to the next round // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, round, 0, propBlockID0) + newProp := consensus.NewProposal(height, round, 0, propBlockID0) p := newProp.ToProto() if err := vs3.SignProposal(context.Background(), config.ChainID(), p); err != nil { t.Fatal(err) @@ -1157,13 +1160,13 @@ func TestProposeValidBlock(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + unlockCh := subscribe(cs1.eventBus, events.EventQueryUnlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1188,7 +1191,7 @@ func TestProposeValidBlock(t *testing.T) { // we should have precommitted validatePrecommit(t, cs1, round, round, vss[0], propBlockHash, propBlockHash) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) @@ -1205,7 +1208,7 @@ func TestProposeValidBlock(t *testing.T) { ensurePrevote(voteCh, height, round) validatePrevote(t, cs1, round, vss[0], propBlockHash) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) ensureNewUnlock(unlockCh, height, round) @@ -1216,7 +1219,7 @@ func TestProposeValidBlock(t *testing.T) { incrementRound(vs2, vs3, vs4) incrementRound(vs2, vs3, vs4) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) round += 2 // moving to the next round @@ -1249,12 +1252,12 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1277,7 +1280,7 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { signAddVotes(config, cs1, tmproto.PrevoteType, propBlockHash, propBlockParts.Header(), vs2) // vs3 send prevote nil - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs3) + signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs3) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Prevote(round).Nanoseconds()) @@ -1313,17 +1316,17 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) round++ // move to round in which P0 is not proposer incrementRound(vs2, vs3, vs4) @@ -1371,14 +1374,14 @@ func TestWaitingTimeoutOnNilPolka(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) // start round startTestRound(cs1, height, round) ensureNewRound(newRoundCh, height, round) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutWaitCh, height, round, cs1.config.Precommit(round).Nanoseconds()) ensureNewRound(newRoundCh, height, round+1) @@ -1394,8 +1397,8 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1408,7 +1411,7 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -1432,8 +1435,8 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1446,7 +1449,7 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { ensurePrevote(voteCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) round++ // moving to the next round ensureNewRound(newRoundCh, height, round) @@ -1470,8 +1473,8 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1482,7 +1485,7 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { ensureNewRound(newRoundCh, height, round) incrementRound(vss[1:]...) - signAddVotes(config, cs1, tmproto.PrevoteType, nil, types.PartSetHeader{}, vs2, vs3, vs4) + signAddVotes(config, cs1, tmproto.PrevoteType, nil, metadata.PartSetHeader{}, vs2, vs3, vs4) ensureNewTimeout(timeoutProposeCh, height, round, cs1.config.Propose(round).Nanoseconds()) @@ -1501,10 +1504,10 @@ func TestEmitNewValidBlockEventOnCommitWithoutBlock(t *testing.T) { incrementRound(vs2, vs3, vs4) - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock) _, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) propBlockHash := propBlock.Hash() @@ -1535,11 +1538,11 @@ func TestCommitFromPreviousRound(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, int32(1) - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + validBlockCh := subscribe(cs1.eventBus, events.EventQueryValidBlock) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round) propBlockHash := propBlock.Hash() @@ -1593,12 +1596,12 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - precommitTimeoutCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutProposeCh := subscribe(cs1.eventBus, events.EventQueryTimeoutPropose) + precommitTimeoutCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + newBlockHeader := subscribe(cs1.eventBus, events.EventQueryNewBlockHeader) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1623,7 +1626,7 @@ func TestStartNextHeightCorrectlyAfterTimeout(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2) signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) // wait till timeout occurs @@ -1655,12 +1658,12 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + newBlockHeader := subscribe(cs1.eventBus, events.EventQueryNewBlockHeader) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1684,7 +1687,7 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], theBlockHash, theBlockHash) // add precommits - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2) signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs3) signAddVotes(config, cs1, tmproto.PrecommitType, theBlockHash, theBlockParts, vs4) @@ -1715,9 +1718,9 @@ func TestStateSlashingPrevotes(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote @@ -1750,9 +1753,9 @@ func TestStateSlashingPrecommits(t *testing.T) { vs2 := vss[1] - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) // start round and wait for propose and prevote @@ -1796,12 +1799,12 @@ func TestStateHalt1(t *testing.T) { cs1, vss := randState(config, 4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] height, round := cs1.Height, cs1.Round - partSize := types.BlockPartSizeBytes + partSize := metadata.BlockPartSizeBytes - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) + proposalCh := subscribe(cs1.eventBus, events.EventQueryCompleteProposal) + timeoutWaitCh := subscribe(cs1.eventBus, events.EventQueryTimeoutWait) + newRoundCh := subscribe(cs1.eventBus, events.EventQueryNewRound) + newBlockCh := subscribe(cs1.eventBus, events.EventQueryNewBlock) pv1, err := cs1.privValidator.GetPubKey(context.Background()) require.NoError(t, err) addr := pv1.Address() @@ -1825,7 +1828,7 @@ func TestStateHalt1(t *testing.T) { validatePrecommit(t, cs1, round, round, vss[0], propBlock.Hash(), propBlock.Hash()) // add precommits from the rest - signAddVotes(config, cs1, tmproto.PrecommitType, nil, types.PartSetHeader{}, vs2) // didnt receive proposal + signAddVotes(config, cs1, tmproto.PrecommitType, nil, metadata.PartSetHeader{}, vs2) // didnt receive proposal signAddVotes(config, cs1, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header(), vs3) // we receive this later, but vs3 might receive it earlier and with ours will go to commit! precommit4 := signVote(vs4, config, tmproto.PrecommitType, propBlock.Hash(), propBlockParts.Header()) @@ -1867,14 +1870,14 @@ func TestStateOutputsBlockPartsStats(t *testing.T) { peer := p2pmock.NewPeer(nil) // 1) new block part - parts := types.NewPartSetFromData(tmrand.Bytes(100), 10) + parts := metadata.NewPartSetFromData(tmrand.Bytes(100), 10) msg := &BlockPartMessage{ Height: 1, Round: 0, Part: parts.GetPart(0), } - cs.ProposalBlockParts = types.NewPartSetFromHeader(parts.Header()) + cs.ProposalBlockParts = metadata.NewPartSetFromHeader(parts.Header()) cs.handleMsg(msgInfo{msg, peer.ID()}) statsMessage := <-cs.statsMsgQueue @@ -1913,7 +1916,7 @@ func TestStateOutputVoteStats(t *testing.T) { randBytes := tmrand.Bytes(tmhash.Size) - vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) + vote := signVote(vss[1], config, tmproto.PrecommitType, randBytes, metadata.PartSetHeader{}) voteMessage := &VoteMessage{vote} cs.handleMsg(msgInfo{voteMessage, peer.ID()}) @@ -1927,7 +1930,7 @@ func TestStateOutputVoteStats(t *testing.T) { // sending the vote for the bigger height incrementHeight(vss[1]) - vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, types.PartSetHeader{}) + vote = signVote(vss[1], config, tmproto.PrecommitType, randBytes, metadata.PartSetHeader{}) cs.handleMsg(msgInfo{&VoteMessage{vote}, peer.ID()}) @@ -1950,21 +1953,21 @@ func TestSignSameVoteTwice(t *testing.T) { config, tmproto.PrecommitType, randBytes, - types.PartSetHeader{Total: 10, Hash: randBytes}, + metadata.PartSetHeader{Total: 10, Hash: randBytes}, ) vote2 := signVote(vss[1], config, tmproto.PrecommitType, randBytes, - types.PartSetHeader{Total: 10, Hash: randBytes}, + metadata.PartSetHeader{Total: 10, Hash: randBytes}, ) require.Equal(t, vote, vote2) } // subscribe subscribes test client to the given query and returns a channel with cap = 1. -func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { +func subscribe(eventBus *events.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { sub, err := eventBus.Subscribe(context.Background(), testSubscriber, q) if err != nil { panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) @@ -1973,7 +1976,7 @@ func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Messa } // subscribe subscribes test client to the given query and returns a channel with cap = 0. -func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { +func subscribeUnBuffered(eventBus *events.EventBus, q tmpubsub.Query) <-chan tmpubsub.Message { sub, err := eventBus.SubscribeUnbuffered(context.Background(), testSubscriber, q) if err != nil { panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) diff --git a/internal/consensus/types/height_vote_set.go b/internal/consensus/types/height_vote_set.go index 86b3e2c4f..4ff74d41d 100644 --- a/internal/consensus/types/height_vote_set.go +++ b/internal/consensus/types/height_vote_set.go @@ -8,13 +8,15 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) type RoundVoteSet struct { - Prevotes *types.VoteSet - Precommits *types.VoteSet + Prevotes *consensus.VoteSet + Precommits *consensus.VoteSet } var ( @@ -40,15 +42,15 @@ One for their LastCommit round, and another for the official commit round. type HeightVoteSet struct { chainID string height int64 - valSet *types.ValidatorSet + valSet *consensus.ValidatorSet mtx sync.Mutex - round int32 // max tracked round - roundVoteSets map[int32]RoundVoteSet // keys: [0...round] - peerCatchupRounds map[types.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds + round int32 // max tracked round + roundVoteSets map[int32]RoundVoteSet // keys: [0...round] + peerCatchupRounds map[p2p.NodeID][]int32 // keys: peer.ID; values: at most 2 rounds } -func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { +func NewHeightVoteSet(chainID string, height int64, valSet *consensus.ValidatorSet) *HeightVoteSet { hvs := &HeightVoteSet{ chainID: chainID, } @@ -56,14 +58,14 @@ func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) return hvs } -func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { +func (hvs *HeightVoteSet) Reset(height int64, valSet *consensus.ValidatorSet) { hvs.mtx.Lock() defer hvs.mtx.Unlock() hvs.height = height hvs.valSet = valSet hvs.roundVoteSets = make(map[int32]RoundVoteSet) - hvs.peerCatchupRounds = make(map[types.NodeID][]int32) + hvs.peerCatchupRounds = make(map[p2p.NodeID][]int32) hvs.addRound(0) hvs.round = 0 @@ -103,8 +105,8 @@ func (hvs *HeightVoteSet) addRound(round int32) { panic("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet) + prevotes := consensus.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrevoteType, hvs.valSet) + precommits := consensus.NewVoteSet(hvs.chainID, hvs.height, round, tmproto.PrecommitType, hvs.valSet) hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, Precommits: precommits, @@ -113,10 +115,10 @@ func (hvs *HeightVoteSet) addRound(round int32) { // Duplicate votes return added=false, err=nil. // By convention, peerID is "" if origin is self. -func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID types.NodeID) (added bool, err error) { +func (hvs *HeightVoteSet) AddVote(vote *consensus.Vote, peerID p2p.NodeID) (added bool, err error) { hvs.mtx.Lock() defer hvs.mtx.Unlock() - if !types.IsVoteTypeValid(vote.Type) { + if !consensus.IsVoteTypeValid(vote.Type) { return } voteSet := hvs.getVoteSet(vote.Round, vote.Type) @@ -135,13 +137,13 @@ func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID types.NodeID) (added return } -func (hvs *HeightVoteSet) Prevotes(round int32) *types.VoteSet { +func (hvs *HeightVoteSet) Prevotes(round int32) *consensus.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() return hvs.getVoteSet(round, tmproto.PrevoteType) } -func (hvs *HeightVoteSet) Precommits(round int32) *types.VoteSet { +func (hvs *HeightVoteSet) Precommits(round int32) *consensus.VoteSet { hvs.mtx.Lock() defer hvs.mtx.Unlock() return hvs.getVoteSet(round, tmproto.PrecommitType) @@ -149,7 +151,7 @@ func (hvs *HeightVoteSet) Precommits(round int32) *types.VoteSet { // Last round and blockID that has +2/3 prevotes for a particular block or nil. // Returns -1 if no such round exists. -func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) { +func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID metadata.BlockID) { hvs.mtx.Lock() defer hvs.mtx.Unlock() for r := hvs.round; r >= 0; r-- { @@ -159,10 +161,10 @@ func (hvs *HeightVoteSet) POLInfo() (polRound int32, polBlockID types.BlockID) { return r, polBlockID } } - return -1, types.BlockID{} + return -1, metadata.BlockID{} } -func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *types.VoteSet { +func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType) *consensus.VoteSet { rvs, ok := hvs.roundVoteSets[round] if !ok { return nil @@ -184,18 +186,18 @@ func (hvs *HeightVoteSet) getVoteSet(round int32, voteType tmproto.SignedMsgType func (hvs *HeightVoteSet) SetPeerMaj23( round int32, voteType tmproto.SignedMsgType, - peerID types.NodeID, - blockID types.BlockID) error { + peerID p2p.NodeID, + blockID metadata.BlockID) error { hvs.mtx.Lock() defer hvs.mtx.Unlock() - if !types.IsVoteTypeValid(voteType) { + if !consensus.IsVoteTypeValid(voteType) { return fmt.Errorf("setPeerMaj23: Invalid vote type %X", voteType) } voteSet := hvs.getVoteSet(round, voteType) if voteSet == nil { return nil // something we don't know about yet } - return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID) + return voteSet.SetPeerMaj23(consensus.P2PID(peerID), blockID) } //--------------------------------------------------------- diff --git a/internal/consensus/types/height_vote_set_test.go b/internal/consensus/types/height_vote_set_test.go index a9e309b4f..023aa7a69 100644 --- a/internal/consensus/types/height_vote_set_test.go +++ b/internal/consensus/types/height_vote_set_test.go @@ -11,8 +11,9 @@ import ( "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) var config *cfg.Config // NOTE: must be reset for each _test.go file @@ -57,7 +58,7 @@ func TestPeerCatchupRounds(t *testing.T) { } -func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []types.PrivValidator) *types.Vote { +func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []consensus.PrivValidator) *consensus.Vote { privVal := privVals[valIndex] pubKey, err := privVal.GetPubKey(context.Background()) if err != nil { @@ -66,14 +67,14 @@ func makeVoteHR(t *testing.T, height int64, valIndex, round int32, privVals []ty randBytes := tmrand.Bytes(tmhash.Size) - vote := &types.Vote{ + vote := &consensus.Vote{ ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, Height: height, Round: round, Timestamp: tmtime.Now(), Type: tmproto.PrecommitType, - BlockID: types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}}, + BlockID: metadata.BlockID{Hash: randBytes, PartSetHeader: metadata.PartSetHeader{}}, } chainID := config.ChainID() diff --git a/internal/consensus/types/peer_round_state.go b/internal/consensus/types/peer_round_state.go index 9d294d9af..8a05e8345 100644 --- a/internal/consensus/types/peer_round_state.go +++ b/internal/consensus/types/peer_round_state.go @@ -5,7 +5,7 @@ import ( "time" "github.com/tendermint/tendermint/libs/bits" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/metadata" ) //----------------------------------------------------------------------------- @@ -21,9 +21,9 @@ type PeerRoundState struct { StartTime time.Time `json:"start_time"` // True if peer has proposal for this round - Proposal bool `json:"proposal"` - ProposalBlockPartSetHeader types.PartSetHeader `json:"proposal_block_part_set_header"` - ProposalBlockParts *bits.BitArray `json:"proposal_block_parts"` + Proposal bool `json:"proposal"` + ProposalBlockPartSetHeader metadata.PartSetHeader `json:"proposal_block_part_set_header"` + ProposalBlockParts *bits.BitArray `json:"proposal_block_parts"` // Proposal's POL round. -1 if none. ProposalPOLRound int32 `json:"proposal_pol_round"` @@ -57,7 +57,7 @@ func (prs PeerRoundState) Copy() PeerRoundState { hashCopy := make([]byte, len(headerHash)) copy(hashCopy, headerHash) - prs.ProposalBlockPartSetHeader = types.PartSetHeader{ + prs.ProposalBlockPartSetHeader = metadata.PartSetHeader{ Total: prs.ProposalBlockPartSetHeader.Total, Hash: hashCopy, } diff --git a/internal/consensus/types/round_state.go b/internal/consensus/types/round_state.go index 9e67b76c0..ffddaefe6 100644 --- a/internal/consensus/types/round_state.go +++ b/internal/consensus/types/round_state.go @@ -6,7 +6,10 @@ import ( "time" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" ) //----------------------------------------------------------------------------- @@ -71,37 +74,37 @@ type RoundState struct { StartTime time.Time `json:"start_time"` // Subjective time when +2/3 precommits for Block at Round were found - CommitTime time.Time `json:"commit_time"` - Validators *types.ValidatorSet `json:"validators"` - Proposal *types.Proposal `json:"proposal"` - ProposalBlock *types.Block `json:"proposal_block"` - ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` - LockedRound int32 `json:"locked_round"` - LockedBlock *types.Block `json:"locked_block"` - LockedBlockParts *types.PartSet `json:"locked_block_parts"` + CommitTime time.Time `json:"commit_time"` + Validators *consensus.ValidatorSet `json:"validators"` + Proposal *consensus.Proposal `json:"proposal"` + ProposalBlock *block.Block `json:"proposal_block"` + ProposalBlockParts *metadata.PartSet `json:"proposal_block_parts"` + LockedRound int32 `json:"locked_round"` + LockedBlock *block.Block `json:"locked_block"` + LockedBlockParts *metadata.PartSet `json:"locked_block_parts"` // Last known round with POL for non-nil valid block. ValidRound int32 `json:"valid_round"` - ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. + ValidBlock *block.Block `json:"valid_block"` // Last known block of POL mentioned above. // Last known block parts of POL mentioned above. - ValidBlockParts *types.PartSet `json:"valid_block_parts"` - Votes *HeightVoteSet `json:"votes"` - CommitRound int32 `json:"commit_round"` // - LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 - LastValidators *types.ValidatorSet `json:"last_validators"` - TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` + ValidBlockParts *metadata.PartSet `json:"valid_block_parts"` + Votes *HeightVoteSet `json:"votes"` + CommitRound int32 `json:"commit_round"` // + LastCommit *consensus.VoteSet `json:"last_commit"` // Last precommits at Height-1 + LastValidators *consensus.ValidatorSet `json:"last_validators"` + TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` } // Compressed version of the RoundState for use in RPC type RoundStateSimple struct { - HeightRoundStep string `json:"height/round/step"` - StartTime time.Time `json:"start_time"` - ProposalBlockHash bytes.HexBytes `json:"proposal_block_hash"` - LockedBlockHash bytes.HexBytes `json:"locked_block_hash"` - ValidBlockHash bytes.HexBytes `json:"valid_block_hash"` - Votes json.RawMessage `json:"height_vote_set"` - Proposer types.ValidatorInfo `json:"proposer"` + HeightRoundStep string `json:"height/round/step"` + StartTime time.Time `json:"start_time"` + ProposalBlockHash bytes.HexBytes `json:"proposal_block_hash"` + LockedBlockHash bytes.HexBytes `json:"locked_block_hash"` + ValidBlockHash bytes.HexBytes `json:"valid_block_hash"` + Votes json.RawMessage `json:"height_vote_set"` + Proposer consensus.ValidatorInfo `json:"proposer"` } // Compress the RoundState to RoundStateSimple @@ -121,7 +124,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { LockedBlockHash: rs.LockedBlock.Hash(), ValidBlockHash: rs.ValidBlock.Hash(), Votes: votesJSON, - Proposer: types.ValidatorInfo{ + Proposer: consensus.ValidatorInfo{ Address: addr, Index: idx, }, @@ -129,15 +132,15 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { } // NewRoundEvent returns the RoundState with proposer information as an event. -func (rs *RoundState) NewRoundEvent() types.EventDataNewRound { +func (rs *RoundState) NewRoundEvent() events.EventDataNewRound { addr := rs.Validators.GetProposer().Address idx, _ := rs.Validators.GetByAddress(addr) - return types.EventDataNewRound{ + return events.EventDataNewRound{ Height: rs.Height, Round: rs.Round, Step: rs.Step.String(), - Proposer: types.ValidatorInfo{ + Proposer: consensus.ValidatorInfo{ Address: addr, Index: idx, }, @@ -145,15 +148,15 @@ func (rs *RoundState) NewRoundEvent() types.EventDataNewRound { } // CompleteProposalEvent returns information about a proposed block as an event. -func (rs *RoundState) CompleteProposalEvent() types.EventDataCompleteProposal { +func (rs *RoundState) CompleteProposalEvent() events.EventDataCompleteProposal { // We must construct BlockID from ProposalBlock and ProposalBlockParts // cs.Proposal is not guaranteed to be set when this function is called - blockID := types.BlockID{ + blockID := metadata.BlockID{ Hash: rs.ProposalBlock.Hash(), PartSetHeader: rs.ProposalBlockParts.Header(), } - return types.EventDataCompleteProposal{ + return events.EventDataCompleteProposal{ Height: rs.Height, Round: rs.Round, Step: rs.Step.String(), @@ -162,8 +165,8 @@ func (rs *RoundState) CompleteProposalEvent() types.EventDataCompleteProposal { } // RoundStateEvent returns the H/R/S of the RoundState as an event. -func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { - return types.EventDataRoundState{ +func (rs *RoundState) RoundStateEvent() events.EventDataRoundState { + return events.EventDataRoundState{ Height: rs.Height, Round: rs.Round, Step: rs.Step.String(), diff --git a/internal/consensus/wal_generator.go b/internal/consensus/wal_generator.go index 81c2125ca..b4a99e78b 100644 --- a/internal/consensus/wal_generator.go +++ b/internal/consensus/wal_generator.go @@ -16,11 +16,12 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) // WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a @@ -46,7 +47,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { if err != nil { return err } - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) + genDoc, err := consensus.GenesisDocFromFile(config.GenesisFile()) if err != nil { return fmt.Errorf("failed to read genesis file: %w", err) } @@ -75,7 +76,7 @@ func WALGenerateNBlocks(t *testing.T, wr io.Writer, numBlocks int) (err error) { } }) - eventBus := types.NewEventBus() + eventBus := events.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) if err := eventBus.Start(); err != nil { return fmt.Errorf("failed to start event bus: %w", err) diff --git a/internal/consensus/wal_test.go b/internal/consensus/wal_test.go index 180af5f34..eb1eb3512 100644 --- a/internal/consensus/wal_test.go +++ b/internal/consensus/wal_test.go @@ -17,7 +17,8 @@ import ( "github.com/tendermint/tendermint/internal/libs/autofile" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" - tmtypes "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" ) const ( @@ -71,7 +72,7 @@ func TestWALTruncate(t *testing.T) { dec := NewWALDecoder(gr) msg, err := dec.Decode() assert.NoError(t, err, "expected to decode a message") - rs, ok := msg.Msg.(tmtypes.EventDataRoundState) + rs, ok := msg.Msg.(events.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") assert.Equal(t, rs.Height, h+1, "wrong height") } @@ -81,7 +82,7 @@ func TestWALEncoderDecoder(t *testing.T) { msgs := []TimedWALMessage{ {Time: now, Msg: EndHeightMessage{0}}, {Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, - {Time: now, Msg: tmtypes.EventDataRoundState{Height: 1, Round: 1, Step: ""}}, + {Time: now, Msg: events.EventDataRoundState{Height: 1, Round: 1, Step: ""}}, } b := new(bytes.Buffer) @@ -124,7 +125,7 @@ func TestWALWrite(t *testing.T) { msg := &BlockPartMessage{ Height: 1, Round: 1, - Part: &tmtypes.Part{ + Part: &metadata.Part{ Index: 1, Bytes: make([]byte, 1), Proof: merkle.Proof{ @@ -164,7 +165,7 @@ func TestWALSearchForEndHeight(t *testing.T) { dec := NewWALDecoder(gr) msg, err := dec.Decode() assert.NoError(t, err, "expected to decode a message") - rs, ok := msg.Msg.(tmtypes.EventDataRoundState) + rs, ok := msg.Msg.(events.EventDataRoundState) assert.True(t, ok, "expected message of type EventDataRoundState") assert.Equal(t, rs.Height, h+1, "wrong height") } diff --git a/internal/evidence/mocks/block_store.go b/internal/evidence/mocks/block_store.go index ef3346b2a..fa6e4ff95 100644 --- a/internal/evidence/mocks/block_store.go +++ b/internal/evidence/mocks/block_store.go @@ -3,8 +3,11 @@ package mocks import ( + block "github.com/tendermint/tendermint/pkg/block" + + metadata "github.com/tendermint/tendermint/pkg/metadata" + mock "github.com/stretchr/testify/mock" - types "github.com/tendermint/tendermint/types" ) // BlockStore is an autogenerated mock type for the BlockStore type @@ -27,15 +30,15 @@ func (_m *BlockStore) Height() int64 { } // LoadBlockCommit provides a mock function with given fields: height -func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { +func (_m *BlockStore) LoadBlockCommit(height int64) *metadata.Commit { ret := _m.Called(height) - var r0 *types.Commit - if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + var r0 *metadata.Commit + if rf, ok := ret.Get(0).(func(int64) *metadata.Commit); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Commit) + r0 = ret.Get(0).(*metadata.Commit) } } @@ -43,15 +46,15 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { } // LoadBlockMeta provides a mock function with given fields: height -func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { +func (_m *BlockStore) LoadBlockMeta(height int64) *block.BlockMeta { ret := _m.Called(height) - var r0 *types.BlockMeta - if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { + var r0 *block.BlockMeta + if rf, ok := ret.Get(0).(func(int64) *block.BlockMeta); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.BlockMeta) + r0 = ret.Get(0).(*block.BlockMeta) } } diff --git a/internal/evidence/pool.go b/internal/evidence/pool.go index 8ca97fd17..487e5d840 100644 --- a/internal/evidence/pool.go +++ b/internal/evidence/pool.go @@ -15,9 +15,10 @@ import ( clist "github.com/tendermint/tendermint/internal/libs/clist" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/consensus" + types "github.com/tendermint/tendermint/pkg/evidence" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) const ( @@ -185,7 +186,7 @@ func (evpool *Pool) AddEvidence(ev types.Evidence) error { // the new state called. // // Votes are not verified. -func (evpool *Pool) ReportConflictingVotes(voteA, voteB *types.Vote) { +func (evpool *Pool) ReportConflictingVotes(voteA, voteB *consensus.Vote) { evpool.mtx.Lock() defer evpool.mtx.Unlock() evpool.consensusBuffer = append(evpool.consensusBuffer, duplicateVoteSet{ @@ -581,8 +582,8 @@ func (evpool *Pool) processConsensusBuffer(state sm.State) { } type duplicateVoteSet struct { - VoteA *types.Vote - VoteB *types.Vote + VoteA *consensus.Vote + VoteB *consensus.Vote } func bytesToEv(evBytes []byte) (types.Evidence, error) { diff --git a/internal/evidence/pool_test.go b/internal/evidence/pool_test.go index ac5f27b8e..709c396d2 100644 --- a/internal/evidence/pool_test.go +++ b/internal/evidence/pool_test.go @@ -15,11 +15,15 @@ import ( "github.com/tendermint/tendermint/internal/evidence/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + types "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" sm "github.com/tendermint/tendermint/state" smmocks "github.com/tendermint/tendermint/state/mocks" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -41,7 +45,7 @@ func TestEvidencePoolBasic(t *testing.T) { valSet, privVals := factory.RandValidatorSet(1, 10) blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return( - &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}, + &block.BlockMeta{Header: metadata.Header{Time: defaultEvidenceTime}}, ) stateStore.On("LoadValidators", mock.AnythingOfType("int64")).Return(valSet, nil) stateStore.On("Load").Return(createState(height+1, valSet), nil) @@ -89,7 +93,7 @@ func TestEvidencePoolBasic(t *testing.T) { // Tests inbound evidence for the right time and height func TestAddExpiredEvidence(t *testing.T) { var ( - val = types.NewMockPV() + val = consensus.NewMockPV() height = int64(30) stateStore = initializeValidatorState(t, val, height) evidenceDB = dbm.NewMemDB() @@ -98,11 +102,11 @@ func TestAddExpiredEvidence(t *testing.T) { expiredHeight = int64(2) ) - blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta { + blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *block.BlockMeta { if h == height || h == expiredHeight { - return &types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}} + return &block.BlockMeta{Header: metadata.Header{Time: defaultEvidenceTime}} } - return &types.BlockMeta{Header: types.Header{Time: expiredEvidenceTime}} + return &block.BlockMeta{Header: metadata.Header{Time: expiredEvidenceTime}} }) pool, err := evidence.NewPool(log.TestingLogger(), evidenceDB, stateStore, blockStore) @@ -141,7 +145,7 @@ func TestReportConflictingVotes(t *testing.T) { var height int64 = 10 pool, pv := defaultTestPool(t, height) - val := types.NewValidator(pv.PrivKey.PubKey(), 10) + val := consensus.NewValidator(pv.PrivKey.PubKey(), 10) ev := types.NewMockDuplicateVoteEvidenceWithValidator(height+1, defaultEvidenceTime, pv, evidenceChainID) pool.ReportConflictingVotes(ev.VoteA, ev.VoteB) @@ -161,7 +165,7 @@ func TestReportConflictingVotes(t *testing.T) { state := pool.State() state.LastBlockHeight++ state.LastBlockTime = ev.Time() - state.LastValidators = types.NewValidatorSet([]*types.Validator{val}) + state.LastValidators = consensus.NewValidatorSet([]*consensus.Validator{val}) pool.Update(state, []types.Evidence{}) // should be able to retrieve evidence from pool @@ -202,7 +206,7 @@ func TestEvidencePoolUpdate(t *testing.T) { evidenceChainID, ) lastCommit := makeCommit(height, val.PrivKey.PubKey().Address()) - block := types.MakeBlock(height+1, []types.Tx{}, lastCommit, []types.Evidence{ev}) + block := block.MakeBlock(height+1, []mempool.Tx{}, lastCommit, []types.Evidence{ev}) // update state (partially) state.LastBlockHeight = height + 1 @@ -278,7 +282,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour), LastBlockHeight: 110, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), } stateStore := &smmocks.Store{} @@ -287,8 +291,8 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { stateStore.On("Load").Return(state, nil) blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header}) - blockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header}) + blockStore.On("LoadBlockMeta", height).Return(&block.BlockMeta{Header: *trusted.Header}) + blockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header}) blockStore.On("LoadBlockCommit", height).Return(trusted.Commit) blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) @@ -327,7 +331,7 @@ func TestLightClientAttackEvidenceLifecycle(t *testing.T) { // pending evidence and continue to gossip it func TestRecoverPendingEvidence(t *testing.T) { height := int64(10) - val := types.NewMockPV() + val := consensus.NewMockPV() valAddress := val.PrivKey.PubKey().Address() evidenceDB := dbm.NewMemDB() stateStore := initializeValidatorState(t, val, height) @@ -362,12 +366,12 @@ func TestRecoverPendingEvidence(t *testing.T) { newStateStore.On("Load").Return(sm.State{ LastBlockTime: defaultEvidenceTime.Add(25 * time.Minute), LastBlockHeight: height + 15, - ConsensusParams: types.ConsensusParams{ - Block: types.BlockParams{ + ConsensusParams: consensus.ConsensusParams{ + Block: consensus.BlockParams{ MaxBytes: 22020096, MaxGas: -1, }, - Evidence: types.EvidenceParams{ + Evidence: consensus.EvidenceParams{ MaxAgeNumBlocks: 20, MaxAgeDuration: 20 * time.Minute, MaxBytes: defaultEvidenceMaxBytes, @@ -385,7 +389,7 @@ func TestRecoverPendingEvidence(t *testing.T) { require.Equal(t, goodEvidence, next.Value.(types.Evidence)) } -func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, height int64) sm.Store { +func initializeStateFromValidatorSet(t *testing.T, valSet *consensus.ValidatorSet, height int64) sm.Store { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) state := sm.State{ @@ -397,12 +401,12 @@ func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, h NextValidators: valSet.CopyIncrementProposerPriority(1), LastValidators: valSet, LastHeightValidatorsChanged: 1, - ConsensusParams: types.ConsensusParams{ - Block: types.BlockParams{ + ConsensusParams: consensus.ConsensusParams{ + Block: consensus.BlockParams{ MaxBytes: 22020096, MaxGas: -1, }, - Evidence: types.EvidenceParams{ + Evidence: consensus.EvidenceParams{ MaxAgeNumBlocks: 20, MaxAgeDuration: 20 * time.Minute, MaxBytes: 1000, @@ -419,13 +423,13 @@ func initializeStateFromValidatorSet(t *testing.T, valSet *types.ValidatorSet, h return stateStore } -func initializeValidatorState(t *testing.T, privVal types.PrivValidator, height int64) sm.Store { +func initializeValidatorState(t *testing.T, privVal consensus.PrivValidator, height int64) sm.Store { pubKey, _ := privVal.GetPubKey(context.Background()) - validator := &types.Validator{Address: pubKey.Address(), VotingPower: 10, PubKey: pubKey} + validator := &consensus.Validator{Address: pubKey.Address(), VotingPower: 10, PubKey: pubKey} // create validator set and state - valSet := &types.ValidatorSet{ - Validators: []*types.Validator{validator}, + valSet := &consensus.ValidatorSet{ + Validators: []*consensus.Validator{validator}, Proposer: validator, } @@ -452,19 +456,19 @@ func initializeBlockStore(db dbm.DB, state sm.State, valAddr []byte) *store.Bloc return blockStore } -func makeCommit(height int64, valAddr []byte) *types.Commit { - commitSigs := []types.CommitSig{{ - BlockIDFlag: types.BlockIDFlagCommit, +func makeCommit(height int64, valAddr []byte) *metadata.Commit { + commitSigs := []metadata.CommitSig{{ + BlockIDFlag: metadata.BlockIDFlagCommit, ValidatorAddress: valAddr, Timestamp: defaultEvidenceTime, Signature: []byte("Signature"), }} - return types.NewCommit(height, 0, types.BlockID{}, commitSigs) + return metadata.NewCommit(height, 0, metadata.BlockID{}, commitSigs) } -func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, types.MockPV) { - val := types.NewMockPV() +func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, consensus.MockPV) { + val := consensus.NewMockPV() valAddress := val.PrivKey.PubKey().Address() evidenceDB := dbm.NewMemDB() stateStore := initializeValidatorState(t, val, height) @@ -477,12 +481,12 @@ func defaultTestPool(t *testing.T, height int64) (*evidence.Pool, types.MockPV) return pool, val } -func createState(height int64, valSet *types.ValidatorSet) sm.State { +func createState(height int64, valSet *consensus.ValidatorSet) sm.State { return sm.State{ ChainID: evidenceChainID, LastBlockHeight: height, LastBlockTime: defaultEvidenceTime, Validators: valSet, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), } } diff --git a/internal/evidence/reactor.go b/internal/evidence/reactor.go index a454038fd..1e47f71fc 100644 --- a/internal/evidence/reactor.go +++ b/internal/evidence/reactor.go @@ -11,8 +11,9 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + types "github.com/tendermint/tendermint/pkg/evidence" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) var ( @@ -63,7 +64,7 @@ type Reactor struct { peerWG sync.WaitGroup mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer + peerRoutines map[p2ptypes.NodeID]*tmsync.Closer } // NewReactor returns a reference to a new evidence reactor, which implements the @@ -80,7 +81,7 @@ func NewReactor( evidenceCh: evidenceCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), + peerRoutines: make(map[p2ptypes.NodeID]*tmsync.Closer), } r.BaseService = *service.NewBaseService(logger, "Evidence", r) @@ -291,7 +292,7 @@ func (r *Reactor) processPeerUpdates() { // that the peer has already received or may not be ready for. // // REF: https://github.com/tendermint/tendermint/issues/4727 -func (r *Reactor) broadcastEvidenceLoop(peerID types.NodeID, closer *tmsync.Closer) { +func (r *Reactor) broadcastEvidenceLoop(peerID p2ptypes.NodeID, closer *tmsync.Closer) { var next *clist.CElement defer func() { diff --git a/internal/evidence/reactor_test.go b/internal/evidence/reactor_test.go index b098eb373..36d14bf60 100644 --- a/internal/evidence/reactor_test.go +++ b/internal/evidence/reactor_test.go @@ -21,9 +21,13 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + types "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) var ( @@ -35,11 +39,11 @@ var ( type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*evidence.Reactor - pools map[types.NodeID]*evidence.Pool - evidenceChannels map[types.NodeID]*p2p.Channel - peerUpdates map[types.NodeID]*p2p.PeerUpdates - peerChans map[types.NodeID]chan p2p.PeerUpdate + reactors map[p2ptypes.NodeID]*evidence.Reactor + pools map[p2ptypes.NodeID]*evidence.Pool + evidenceChannels map[p2ptypes.NodeID]*p2p.Channel + peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates + peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate nodes []*p2ptest.Node numStateStores int } @@ -56,10 +60,10 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { numStateStores: numStateStores, logger: log.TestingLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numStateStores}), - reactors: make(map[types.NodeID]*evidence.Reactor, numStateStores), - pools: make(map[types.NodeID]*evidence.Pool, numStateStores), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numStateStores), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numStateStores), + reactors: make(map[p2ptypes.NodeID]*evidence.Reactor, numStateStores), + pools: make(map[p2ptypes.NodeID]*evidence.Pool, numStateStores), + peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numStateStores), + peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numStateStores), } chDesc := p2p.ChannelDescriptor{ID: byte(evidence.EvidenceChannel)} @@ -76,9 +80,9 @@ func setup(t *testing.T, stateStores []sm.Store, chBuf uint) *reactorTestSuite { evidenceDB := dbm.NewMemDB() blockStore := &mocks.BlockStore{} state, _ := stateStores[idx].Load() - blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *types.BlockMeta { + blockStore.On("LoadBlockMeta", mock.AnythingOfType("int64")).Return(func(h int64) *block.BlockMeta { if h <= state.LastBlockHeight { - return &types.BlockMeta{Header: types.Header{Time: evidenceTime}} + return &block.BlockMeta{Header: metadata.Header{Time: evidenceTime}} } return nil }) @@ -124,7 +128,7 @@ func (rts *reactorTestSuite) start(t *testing.T) { "network does not have expected number of nodes") } -func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...types.NodeID) { +func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.EvidenceList, ids ...p2ptypes.NodeID) { t.Helper() fn := func(pool *evidence.Pool) { @@ -188,7 +192,7 @@ func (rts *reactorTestSuite) waitForEvidence(t *testing.T, evList types.Evidence } wg.Add(1) - go func(id types.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id) + go func(id p2ptypes.NodeID) { defer wg.Done(); fn(rts.pools[id]) }(id) } wg.Wait() } @@ -211,7 +215,7 @@ func (rts *reactorTestSuite) assertEvidenceChannelsEmpty(t *testing.T) { func createEvidenceList( t *testing.T, pool *evidence.Pool, - val types.PrivValidator, + val consensus.PrivValidator, numEvidence int, ) types.EvidenceList { t.Helper() @@ -236,7 +240,7 @@ func createEvidenceList( } func TestReactorMultiDisconnect(t *testing.T) { - val := types.NewMockPV() + val := consensus.NewMockPV() height := int64(numEvidence) + 10 stateDB1 := initializeValidatorState(t, val, height) @@ -275,7 +279,7 @@ func TestReactorBroadcastEvidence(t *testing.T) { // create a stateDB for all test suites (nodes) stateDBs := make([]sm.Store, numPeers) - val := types.NewMockPV() + val := consensus.NewMockPV() // We need all validators saved for heights at least as high as we have // evidence for. @@ -293,7 +297,7 @@ func TestReactorBroadcastEvidence(t *testing.T) { // primary. As a result, the primary will gossip all evidence to each secondary. primary := rts.network.RandomNode() secondaries := make([]*p2ptest.Node, 0, len(rts.network.NodeIDs())-1) - secondaryIDs := make([]types.NodeID, 0, cap(secondaries)) + secondaryIDs := make([]p2ptypes.NodeID, 0, cap(secondaries)) for id := range rts.network.Nodes { if id == primary.NodeID { continue @@ -329,7 +333,7 @@ func TestReactorBroadcastEvidence(t *testing.T) { // connected to one another but are at different heights. Reactor 1 which is // ahead receives a list of evidence. func TestReactorBroadcastEvidence_Lagging(t *testing.T) { - val := types.NewMockPV() + val := consensus.NewMockPV() height1 := int64(numEvidence) + 10 height2 := int64(numEvidence) / 2 @@ -365,7 +369,7 @@ func TestReactorBroadcastEvidence_Lagging(t *testing.T) { } func TestReactorBroadcastEvidence_Pending(t *testing.T) { - val := types.NewMockPV() + val := consensus.NewMockPV() height := int64(10) stateDB1 := initializeValidatorState(t, val, height) @@ -405,7 +409,7 @@ func TestReactorBroadcastEvidence_Pending(t *testing.T) { } func TestReactorBroadcastEvidence_Committed(t *testing.T) { - val := types.NewMockPV() + val := consensus.NewMockPV() height := int64(10) stateDB1 := initializeValidatorState(t, val, height) @@ -465,7 +469,7 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { // create a stateDB for all test suites (nodes) stateDBs := make([]sm.Store, numPeers) - val := types.NewMockPV() + val := consensus.NewMockPV() // We need all validators saved for heights at least as high as we have // evidence for. @@ -506,18 +510,18 @@ func TestReactorBroadcastEvidence_FullyConnected(t *testing.T) { // nolint:lll func TestEvidenceListSerialization(t *testing.T) { - exampleVote := func(msgType byte) *types.Vote { - var stamp, err = time.Parse(types.TimeFormat, "2017-12-25T03:00:01.234Z") + exampleVote := func(msgType byte) *consensus.Vote { + var stamp, err = time.Parse(metadata.TimeFormat, "2017-12-25T03:00:01.234Z") require.NoError(t, err) - return &types.Vote{ + return &consensus.Vote{ Type: tmproto.SignedMsgType(msgType), Height: 3, Round: 2, Timestamp: stamp, - BlockID: types.BlockID{ + BlockID: metadata.BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: 1000000, Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, @@ -527,12 +531,12 @@ func TestEvidenceListSerialization(t *testing.T) { } } - val := &types.Validator{ + val := &consensus.Validator{ Address: crypto.AddressHash([]byte("validator_address")), VotingPower: 10, } - valSet := types.NewValidatorSet([]*types.Validator{val}) + valSet := consensus.NewValidatorSet([]*consensus.Validator{val}) dupl := types.NewDuplicateVoteEvidence( exampleVote(1), diff --git a/internal/evidence/services.go b/internal/evidence/services.go index 473999b21..6f699202d 100644 --- a/internal/evidence/services.go +++ b/internal/evidence/services.go @@ -1,13 +1,14 @@ package evidence import ( - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" ) //go:generate ../../scripts/mockery_generate.sh BlockStore type BlockStore interface { - LoadBlockMeta(height int64) *types.BlockMeta - LoadBlockCommit(height int64) *types.Commit + LoadBlockMeta(height int64) *block.BlockMeta + LoadBlockCommit(height int64) *metadata.Commit Height() int64 } diff --git a/internal/evidence/verify.go b/internal/evidence/verify.go index 99c8e28e8..14a3af78e 100644 --- a/internal/evidence/verify.go +++ b/internal/evidence/verify.go @@ -7,7 +7,9 @@ import ( "time" "github.com/tendermint/tendermint/light" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + types "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" ) // verify verifies the evidence fully by checking: @@ -156,8 +158,8 @@ func (evpool *Pool) verify(evidence types.Evidence) error { // // CONTRACT: must run ValidateBasic() on the evidence before verifying // must check that the evidence has not expired (i.e. is outside the maximum age threshold) -func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *types.SignedHeader, - commonVals *types.ValidatorSet, now time.Time, trustPeriod time.Duration) error { +func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, trustedHeader *metadata.SignedHeader, + commonVals *consensus.ValidatorSet, now time.Time, trustPeriod time.Duration) error { // In the case of lunatic attack there will be a different commonHeader height. Therefore the node perform a single // verification jump between the common header and the conflicting one if commonHeader.Height != e.ConflictingBlock.Height { @@ -199,7 +201,7 @@ func VerifyLightClientAttack(e *types.LightClientAttackEvidence, commonHeader, t // - the height, round, type and validator address of the votes must be the same // - the block ID's must be different // - The signatures must both be valid -func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *types.ValidatorSet) error { +func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet *consensus.ValidatorSet) error { _, val := valSet.GetByAddress(e.VoteA.ValidatorAddress) if val == nil { return fmt.Errorf("address %X was not a validator at height %d", e.VoteA.ValidatorAddress, e.Height()) @@ -241,17 +243,17 @@ func VerifyDuplicateVote(e *types.DuplicateVoteEvidence, chainID string, valSet va := e.VoteA.ToProto() vb := e.VoteB.ToProto() // Signatures must be valid - if !pubKey.VerifySignature(types.VoteSignBytes(chainID, va), e.VoteA.Signature) { - return fmt.Errorf("verifying VoteA: %w", types.ErrVoteInvalidSignature) + if !pubKey.VerifySignature(consensus.VoteSignBytes(chainID, va), e.VoteA.Signature) { + return fmt.Errorf("verifying VoteA: %w", consensus.ErrVoteInvalidSignature) } - if !pubKey.VerifySignature(types.VoteSignBytes(chainID, vb), e.VoteB.Signature) { - return fmt.Errorf("verifying VoteB: %w", types.ErrVoteInvalidSignature) + if !pubKey.VerifySignature(consensus.VoteSignBytes(chainID, vb), e.VoteB.Signature) { + return fmt.Errorf("verifying VoteB: %w", consensus.ErrVoteInvalidSignature) } return nil } -func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader, error) { +func getSignedHeader(blockStore BlockStore, height int64) (*metadata.SignedHeader, error) { blockMeta := blockStore.LoadBlockMeta(height) if blockMeta == nil { return nil, fmt.Errorf("don't have header at height #%d", height) @@ -260,7 +262,7 @@ func getSignedHeader(blockStore BlockStore, height int64) (*types.SignedHeader, if commit == nil { return nil, fmt.Errorf("don't have commit at height #%d", height) } - return &types.SignedHeader{ + return &metadata.SignedHeader{ Header: &blockMeta.Header, Commit: commit, }, nil diff --git a/internal/evidence/verify_test.go b/internal/evidence/verify_test.go index 85f997f2a..59bd36b64 100644 --- a/internal/evidence/verify_test.go +++ b/internal/evidence/verify_test.go @@ -17,10 +17,14 @@ import ( "github.com/tendermint/tendermint/internal/evidence/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + types "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" smmocks "github.com/tendermint/tendermint/state/mocks" - "github.com/tendermint/tendermint/types" ) const ( @@ -81,14 +85,14 @@ func TestVerify_LunaticAttackAgainstState(t *testing.T) { state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour), LastBlockHeight: height + 1, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), } stateStore := &smmocks.Store{} stateStore.On("LoadValidators", commonHeight).Return(common.ValidatorSet, nil) stateStore.On("Load").Return(state, nil) blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header}) - blockStore.On("LoadBlockMeta", height).Return(&types.BlockMeta{Header: *trusted.Header}) + blockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header}) + blockStore.On("LoadBlockMeta", height).Return(&block.BlockMeta{Header: *trusted.Header}) blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) blockStore.On("LoadBlockCommit", height).Return(trusted.Commit) pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore) @@ -150,7 +154,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(2 * time.Hour), LastBlockHeight: nodeHeight, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), } // modify trusted light block so that it is of a height less than the conflicting one @@ -161,8 +165,8 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { stateStore.On("LoadValidators", commonHeight).Return(common.ValidatorSet, nil) stateStore.On("Load").Return(state, nil) blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header}) - blockStore.On("LoadBlockMeta", nodeHeight).Return(&types.BlockMeta{Header: *trusted.Header}) + blockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header}) + blockStore.On("LoadBlockMeta", nodeHeight).Return(&block.BlockMeta{Header: *trusted.Header}) blockStore.On("LoadBlockMeta", attackHeight).Return(nil) blockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) blockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit) @@ -177,8 +181,8 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { oldBlockStore := &mocks.BlockStore{} oldHeader := trusted.Header oldHeader.Time = defaultEvidenceTime - oldBlockStore.On("LoadBlockMeta", commonHeight).Return(&types.BlockMeta{Header: *common.Header}) - oldBlockStore.On("LoadBlockMeta", nodeHeight).Return(&types.BlockMeta{Header: *oldHeader}) + oldBlockStore.On("LoadBlockMeta", commonHeight).Return(&block.BlockMeta{Header: *common.Header}) + oldBlockStore.On("LoadBlockMeta", nodeHeight).Return(&block.BlockMeta{Header: *oldHeader}) oldBlockStore.On("LoadBlockMeta", attackHeight).Return(nil) oldBlockStore.On("LoadBlockCommit", commonHeight).Return(common.Commit) oldBlockStore.On("LoadBlockCommit", nodeHeight).Return(trusted.Commit) @@ -193,7 +197,7 @@ func TestVerify_ForwardLunaticAttack(t *testing.T) { func TestVerifyLightClientAttack_Equivocation(t *testing.T) { conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10) - conflictingHeader, err := factory.MakeHeader(&types.Header{ + conflictingHeader, err := factory.MakeHeader(&metadata.Header{ ChainID: evidenceChainID, Height: 10, Time: defaultEvidenceTime, @@ -201,7 +205,7 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { }) require.NoError(t, err) - trustedHeader, _ := factory.MakeHeader(&types.Header{ + trustedHeader, _ := factory.MakeHeader(&metadata.Header{ ChainID: evidenceChainID, Height: 10, Time: defaultEvidenceTime, @@ -215,12 +219,12 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { // we are simulating a duplicate vote attack where all the validators in the conflictingVals set // except the last validator vote twice blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) - voteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + voteSet := consensus.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) commit, err := factory.MakeCommit(blockID, 10, 1, voteSet, conflictingPrivVals[:4], defaultEvidenceTime) require.NoError(t, err) ev := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + ConflictingBlock: &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: conflictingHeader, Commit: commit, }, @@ -233,11 +237,11 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) - trustedVoteSet := types.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedVoteSet := consensus.NewVoteSet(evidenceChainID, 10, 1, tmproto.SignedMsgType(2), conflictingVals) trustedCommit, err := factory.MakeCommit(trustedBlockID, 10, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) - trustedSignedHeader := &types.SignedHeader{ + trustedSignedHeader := &metadata.SignedHeader{ Header: trustedHeader, Commit: trustedCommit, } @@ -264,13 +268,13 @@ func TestVerifyLightClientAttack_Equivocation(t *testing.T) { state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), LastBlockHeight: 11, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), } stateStore := &smmocks.Store{} stateStore.On("LoadValidators", int64(10)).Return(conflictingVals, nil) stateStore.On("Load").Return(state, nil) blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) + blockStore.On("LoadBlockMeta", int64(10)).Return(&block.BlockMeta{Header: *trustedHeader}) blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore) @@ -288,7 +292,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { var height int64 = 10 conflictingVals, conflictingPrivVals := factory.RandValidatorSet(5, 10) - conflictingHeader, err := factory.MakeHeader(&types.Header{ + conflictingHeader, err := factory.MakeHeader(&metadata.Header{ ChainID: evidenceChainID, Height: height, Time: defaultEvidenceTime, @@ -296,7 +300,7 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { }) require.NoError(t, err) - trustedHeader, _ := factory.MakeHeader(&types.Header{ + trustedHeader, _ := factory.MakeHeader(&metadata.Header{ ChainID: evidenceChainID, Height: height, Time: defaultEvidenceTime, @@ -310,12 +314,12 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { // we are simulating an amnesia attack where all the validators in the conflictingVals set // except the last validator vote twice. However this time the commits are of different rounds. blockID := makeBlockID(conflictingHeader.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals) + voteSet := consensus.NewVoteSet(evidenceChainID, height, 0, tmproto.SignedMsgType(2), conflictingVals) commit, err := factory.MakeCommit(blockID, height, 0, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) ev := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + ConflictingBlock: &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: conflictingHeader, Commit: commit, }, @@ -328,11 +332,11 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { } trustedBlockID := makeBlockID(trustedHeader.Hash(), 1000, []byte("partshash")) - trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + trustedVoteSet := consensus.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) trustedCommit, err := factory.MakeCommit(trustedBlockID, height, 1, trustedVoteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) - trustedSignedHeader := &types.SignedHeader{ + trustedSignedHeader := &metadata.SignedHeader{ Header: trustedHeader, Commit: trustedCommit, } @@ -350,13 +354,13 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { state := sm.State{ LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), LastBlockHeight: 11, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), } stateStore := &smmocks.Store{} stateStore.On("LoadValidators", int64(10)).Return(conflictingVals, nil) stateStore.On("Load").Return(state, nil) blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: *trustedHeader}) + blockStore.On("LoadBlockMeta", int64(10)).Return(&block.BlockMeta{Header: *trustedHeader}) blockStore.On("LoadBlockCommit", int64(10)).Return(trustedCommit) pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore) @@ -371,15 +375,15 @@ func TestVerifyLightClientAttack_Amnesia(t *testing.T) { } type voteData struct { - vote1 *types.Vote - vote2 *types.Vote + vote1 *consensus.Vote + vote2 *consensus.Vote valid bool } func TestVerifyDuplicateVoteEvidence(t *testing.T) { - val := types.NewMockPV() - val2 := types.NewMockPV() - valSet := types.NewValidatorSet([]*types.Validator{val.ExtractIntoValidator(1)}) + val := consensus.NewMockPV() + val2 := consensus.NewMockPV() + valSet := consensus.NewValidatorSet([]*consensus.Validator{val.ExtractIntoValidator(1)}) blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) @@ -443,13 +447,13 @@ func TestVerifyDuplicateVoteEvidence(t *testing.T) { ChainID: chainID, LastBlockTime: defaultEvidenceTime.Add(1 * time.Minute), LastBlockHeight: 11, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), } stateStore := &smmocks.Store{} stateStore.On("LoadValidators", int64(10)).Return(valSet, nil) stateStore.On("Load").Return(state, nil) blockStore := &mocks.BlockStore{} - blockStore.On("LoadBlockMeta", int64(10)).Return(&types.BlockMeta{Header: types.Header{Time: defaultEvidenceTime}}) + blockStore.On("LoadBlockMeta", int64(10)).Return(&block.BlockMeta{Header: metadata.Header{Time: defaultEvidenceTime}}) pool, err := evidence.NewPool(log.TestingLogger(), dbm.NewMemDB(), stateStore, blockStore) require.NoError(t, err) @@ -474,7 +478,7 @@ func makeLunaticEvidence( height, commonHeight int64, totalVals, byzVals, phantomVals int, commonTime, attackTime time.Time, -) (ev *types.LightClientAttackEvidence, trusted *types.LightBlock, common *types.LightBlock) { +) (ev *types.LightClientAttackEvidence, trusted *light.LightBlock, common *light.LightBlock) { commonValSet, commonPrivVals := factory.RandValidatorSet(totalVals, defaultVotingPower) require.Greater(t, totalVals, byzVals) @@ -490,20 +494,20 @@ func makeLunaticEvidence( conflictingPrivVals = orderPrivValsByValSet(t, conflictingVals, conflictingPrivVals) - commonHeader, err := factory.MakeHeader(&types.Header{ + commonHeader, err := factory.MakeHeader(&metadata.Header{ ChainID: evidenceChainID, Height: commonHeight, Time: commonTime, }) require.NoError(t, err) - trustedHeader, err := factory.MakeHeader(&types.Header{ + trustedHeader, err := factory.MakeHeader(&metadata.Header{ ChainID: evidenceChainID, Height: height, Time: defaultEvidenceTime, }) require.NoError(t, err) - conflictingHeader, err := factory.MakeHeader(&types.Header{ + conflictingHeader, err := factory.MakeHeader(&metadata.Header{ ChainID: evidenceChainID, Height: height, Time: attackTime, @@ -512,12 +516,12 @@ func makeLunaticEvidence( require.NoError(t, err) blockID := factory.MakeBlockIDWithHash(conflictingHeader.Hash()) - voteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) + voteSet := consensus.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), conflictingVals) commit, err := factory.MakeCommit(blockID, height, 1, voteSet, conflictingPrivVals, defaultEvidenceTime) require.NoError(t, err) ev = &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + ConflictingBlock: &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: conflictingHeader, Commit: commit, }, @@ -529,21 +533,21 @@ func makeLunaticEvidence( Timestamp: commonTime, } - common = &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + common = &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: commonHeader, // we can leave this empty because we shouldn't be checking this - Commit: &types.Commit{}, + Commit: &metadata.Commit{}, }, ValidatorSet: commonValSet, } trustedBlockID := factory.MakeBlockIDWithHash(trustedHeader.Hash()) trustedVals, privVals := factory.RandValidatorSet(totalVals, defaultVotingPower) - trustedVoteSet := types.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals) + trustedVoteSet := consensus.NewVoteSet(evidenceChainID, height, 1, tmproto.SignedMsgType(2), trustedVals) trustedCommit, err := factory.MakeCommit(trustedBlockID, height, 1, trustedVoteSet, privVals, defaultEvidenceTime) require.NoError(t, err) - trusted = &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + trusted = &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: trustedHeader, Commit: trustedCommit, }, @@ -553,11 +557,11 @@ func makeLunaticEvidence( } func makeVote( - t *testing.T, val types.PrivValidator, chainID string, valIndex int32, height int64, - round int32, step int, blockID types.BlockID, time time.Time) *types.Vote { + t *testing.T, val consensus.PrivValidator, chainID string, valIndex int32, height int64, + round int32, step int, blockID metadata.BlockID, time time.Time) *consensus.Vote { pubKey, err := val.GetPubKey(context.Background()) require.NoError(t, err) - v := &types.Vote{ + v := &consensus.Vote{ ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, Height: height, @@ -576,16 +580,16 @@ func makeVote( return v } -func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) metadata.BlockID { var ( h = make([]byte, tmhash.Size) psH = make([]byte, tmhash.Size) ) copy(h, hash) copy(psH, partSetHash) - return types.BlockID{ + return metadata.BlockID{ Hash: h, - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: partSetSize, Hash: psH, }, @@ -593,8 +597,8 @@ func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.Bloc } func orderPrivValsByValSet( - t *testing.T, vals *types.ValidatorSet, privVals []types.PrivValidator) []types.PrivValidator { - output := make([]types.PrivValidator, len(privVals)) + t *testing.T, vals *consensus.ValidatorSet, privVals []consensus.PrivValidator) []consensus.PrivValidator { + output := make([]consensus.PrivValidator, len(privVals)) for idx, v := range vals.Validators { for _, p := range privVals { pubKey, err := p.GetPubKey(context.Background()) diff --git a/internal/mempool/cache.go b/internal/mempool/cache.go index 43174f106..fcee2c0ec 100644 --- a/internal/mempool/cache.go +++ b/internal/mempool/cache.go @@ -4,7 +4,7 @@ import ( "container/list" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" + types "github.com/tendermint/tendermint/pkg/mempool" ) // TxCache defines an interface for raw transaction caching in a mempool. diff --git a/internal/mempool/ids.go b/internal/mempool/ids.go index 49a9ac607..fbb80da24 100644 --- a/internal/mempool/ids.go +++ b/internal/mempool/ids.go @@ -4,21 +4,21 @@ import ( "fmt" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // nolint: golint // TODO: Rename type. type MempoolIDs struct { mtx tmsync.RWMutex - peerMap map[types.NodeID]uint16 + peerMap map[p2p.NodeID]uint16 nextID uint16 // assumes that a node will never have over 65536 active peers activeIDs map[uint16]struct{} // used to check if a given peerID key is used } func NewMempoolIDs() *MempoolIDs { return &MempoolIDs{ - peerMap: make(map[types.NodeID]uint16), + peerMap: make(map[p2p.NodeID]uint16), // reserve UnknownPeerID for mempoolReactor.BroadcastTx activeIDs: map[uint16]struct{}{UnknownPeerID: {}}, @@ -28,7 +28,7 @@ func NewMempoolIDs() *MempoolIDs { // ReserveForPeer searches for the next unused ID and assigns it to the provided // peer. -func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { +func (ids *MempoolIDs) ReserveForPeer(peerID p2p.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -38,7 +38,7 @@ func (ids *MempoolIDs) ReserveForPeer(peerID types.NodeID) { } // Reclaim returns the ID reserved for the peer back to unused pool. -func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { +func (ids *MempoolIDs) Reclaim(peerID p2p.NodeID) { ids.mtx.Lock() defer ids.mtx.Unlock() @@ -50,7 +50,7 @@ func (ids *MempoolIDs) Reclaim(peerID types.NodeID) { } // GetForPeer returns an ID reserved for the peer. -func (ids *MempoolIDs) GetForPeer(peerID types.NodeID) uint16 { +func (ids *MempoolIDs) GetForPeer(peerID p2p.NodeID) uint16 { ids.mtx.RLock() defer ids.mtx.RUnlock() diff --git a/internal/mempool/ids_test.go b/internal/mempool/ids_test.go index a39838627..5def25766 100644 --- a/internal/mempool/ids_test.go +++ b/internal/mempool/ids_test.go @@ -4,13 +4,13 @@ import ( "testing" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) func TestMempoolIDsBasic(t *testing.T) { ids := NewMempoolIDs() - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) ids.ReserveForPeer(peerID) diff --git a/internal/mempool/mempool.go b/internal/mempool/mempool.go index d679b3506..889f0509c 100644 --- a/internal/mempool/mempool.go +++ b/internal/mempool/mempool.go @@ -5,9 +5,10 @@ import ( "fmt" "math" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" + types "github.com/tendermint/tendermint/pkg/mempool" ) const ( @@ -108,7 +109,7 @@ type PostCheckFunc func(types.Tx, *abci.ResponseCheckTx) error // to the expected maxBytes. func PreCheckMaxBytes(maxBytes int64) PreCheckFunc { return func(tx types.Tx) error { - txSize := types.ComputeProtoSizeForTxs([]types.Tx{tx}) + txSize := block.ComputeProtoSizeForTxs([]types.Tx{tx}) if txSize > maxBytes { return fmt.Errorf("tx size is too big: %d, max: %d", txSize, maxBytes) diff --git a/internal/mempool/mock/mempool.go b/internal/mempool/mock/mempool.go index 5749d2d3c..d48989526 100644 --- a/internal/mempool/mock/mempool.go +++ b/internal/mempool/mock/mempool.go @@ -3,10 +3,10 @@ package mock import ( "context" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/libs/clist" mempl "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/mempool" ) // Mempool is an empty implementation of a Mempool, useful for testing. diff --git a/internal/mempool/tx.go b/internal/mempool/tx.go index 860d3d3b4..7aebd32bf 100644 --- a/internal/mempool/tx.go +++ b/internal/mempool/tx.go @@ -3,20 +3,21 @@ package mempool import ( "crypto/sha256" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/p2p" ) // TxKeySize defines the size of the transaction's key used for indexing. const TxKeySize = sha256.Size // TxKey is the fixed length array key used as an index. -func TxKey(tx types.Tx) [TxKeySize]byte { +func TxKey(tx mempool.Tx) [TxKeySize]byte { return sha256.Sum256(tx) } // TxHashFromBytes returns the hash of a transaction from raw bytes. func TxHashFromBytes(tx []byte) []byte { - return types.Tx(tx).Hash() + return mempool.Tx(tx).Hash() } // TxInfo are parameters that get passed when attempting to add a tx to the @@ -28,5 +29,5 @@ type TxInfo struct { SenderID uint16 // SenderNodeID is the actual types.NodeID of the sender. - SenderNodeID types.NodeID + SenderNodeID p2p.NodeID } diff --git a/internal/mempool/v0/cache_test.go b/internal/mempool/v0/cache_test.go index fbb719231..e37ec3270 100644 --- a/internal/mempool/v0/cache_test.go +++ b/internal/mempool/v0/cache_test.go @@ -8,10 +8,10 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/internal/mempool" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) func TestCacheAfterUpdate(t *testing.T) { diff --git a/internal/mempool/v0/clist_mempool.go b/internal/mempool/v0/clist_mempool.go index 40e93cc13..b30e11096 100644 --- a/internal/mempool/v0/clist_mempool.go +++ b/internal/mempool/v0/clist_mempool.go @@ -7,16 +7,18 @@ import ( "sync" "sync/atomic" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" pubmempool "github.com/tendermint/tendermint/pkg/mempool" + types "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) // CListMempool is an ordered in-memory pool for transactions before they are @@ -303,7 +305,7 @@ func (mem *CListMempool) globalCb(req *abci.Request, res *abci.Response) { func (mem *CListMempool) reqResCb( tx []byte, peerID uint16, - peerP2PID types.NodeID, + peerP2PID p2p.NodeID, externalCb func(*abci.Response), ) func(res *abci.Response) { return func(res *abci.Response) { @@ -382,7 +384,7 @@ func (mem *CListMempool) isFull(txSize int) error { func (mem *CListMempool) resCbFirstTime( tx []byte, peerID uint16, - peerP2PID types.NodeID, + peerP2PID p2p.NodeID, res *abci.Response, ) { switch r := res.Value.(type) { @@ -522,7 +524,7 @@ func (mem *CListMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { txs = append(txs, memTx.tx) - dataSize := types.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) + dataSize := block.ComputeProtoSizeForTxs([]types.Tx{memTx.tx}) // Check total size requirement if maxBytes > -1 && runningSize+dataSize > maxBytes { diff --git a/internal/mempool/v0/clist_mempool_test.go b/internal/mempool/v0/clist_mempool_test.go index 65a1b123e..3d6fec528 100644 --- a/internal/mempool/v0/clist_mempool_test.go +++ b/internal/mempool/v0/clist_mempool_test.go @@ -17,15 +17,15 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" abciserver "github.com/tendermint/tendermint/abci/server" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" pubmempool "github.com/tendermint/tendermint/pkg/mempool" + types "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) // A cleanupFunc cleans up any config / test files created for a particular diff --git a/internal/mempool/v0/reactor.go b/internal/mempool/v0/reactor.go index 29dec5833..5a37c3dff 100644 --- a/internal/mempool/v0/reactor.go +++ b/internal/mempool/v0/reactor.go @@ -15,8 +15,9 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + types "github.com/tendermint/tendermint/pkg/mempool" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" ) var ( @@ -28,7 +29,7 @@ var ( // peer information. This should eventually be replaced with a message-oriented // approach utilizing the p2p stack. type PeerManager interface { - GetHeight(types.NodeID) int64 + GetHeight(p2ptypes.NodeID) int64 } // Reactor implements a service that contains mempool of txs that are broadcasted @@ -55,7 +56,7 @@ type Reactor struct { peerWG sync.WaitGroup mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer + peerRoutines map[p2ptypes.NodeID]*tmsync.Closer } // NewReactor returns a reference to a new reactor. @@ -76,7 +77,7 @@ func NewReactor( mempoolCh: mempoolCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), + peerRoutines: make(map[p2ptypes.NodeID]*tmsync.Closer), } r.BaseService = *service.NewBaseService(logger, "Mempool", r) @@ -305,7 +306,7 @@ func (r *Reactor) processPeerUpdates() { } } -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { +func (r *Reactor) broadcastTxRoutine(peerID p2ptypes.NodeID, closer *tmsync.Closer) { peerMempoolID := r.ids.GetForPeer(peerID) var next *clist.CElement diff --git a/internal/mempool/v0/reactor_test.go b/internal/mempool/v0/reactor_test.go index 91729b37c..e7f57e577 100644 --- a/internal/mempool/v0/reactor_test.go +++ b/internal/mempool/v0/reactor_test.go @@ -9,31 +9,32 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/mempool" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*Reactor - mempoolChnnels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*CListMempool - kvstores map[types.NodeID]*kvstore.Application + reactors map[p2ptypes.NodeID]*Reactor + mempoolChnnels map[p2ptypes.NodeID]*p2p.Channel + mempools map[p2ptypes.NodeID]*CListMempool + kvstores map[p2ptypes.NodeID]*kvstore.Application - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates + peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate + peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates - nodes []types.NodeID + nodes []p2ptypes.NodeID } func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reactorTestSuite { @@ -42,12 +43,12 @@ func setup(t *testing.T, cfg *cfg.MempoolConfig, numNodes int, chBuf uint) *reac rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChnnels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*CListMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes), + mempoolChnnels: make(map[p2ptypes.NodeID]*p2p.Channel, numNodes), + mempools: make(map[p2ptypes.NodeID]*CListMempool, numNodes), + kvstores: make(map[p2ptypes.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numNodes), } chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} @@ -118,7 +119,7 @@ func (rts *reactorTestSuite) assertMempoolChannelsDrained(t *testing.T) { } } -func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...types.NodeID) { +func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...p2ptypes.NodeID) { t.Helper() fn := func(pool *CListMempool) { @@ -149,7 +150,7 @@ func (rts *reactorTestSuite) waitForTxns(t *testing.T, txs types.Txs, ids ...typ } wg.Add(1) - func(nid types.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id) + func(nid p2ptypes.NodeID) { defer wg.Done(); fn(rts.reactors[nid].mempool) }(id) } wg.Wait() @@ -313,7 +314,7 @@ func TestDontExhaustMaxActiveIDs(t *testing.T) { nodeID := rts.nodes[0] - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2ptypes.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) // ensure the reactor does not panic (i.e. exhaust active IDs) @@ -357,7 +358,7 @@ func TestMempoolIDsPanicsIfNodeRequestsOvermaxActiveIDs(t *testing.T) { // 0 is already reserved for UnknownPeerID ids := mempool.NewMempoolIDs() - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2ptypes.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) for i := 0; i < mempool.MaxActiveIDs-1; i++ { diff --git a/internal/mempool/v1/mempool.go b/internal/mempool/v1/mempool.go index 850600697..8f1596b89 100644 --- a/internal/mempool/v1/mempool.go +++ b/internal/mempool/v1/mempool.go @@ -7,16 +7,17 @@ import ( "sync/atomic" "time" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" pubmempool "github.com/tendermint/tendermint/pkg/mempool" + types "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) var _ mempool.Mempool = (*TxMempool)(nil) @@ -356,7 +357,7 @@ func (txmp *TxMempool) ReapMaxBytesMaxGas(maxBytes, maxGas int64) types.Txs { wtx := txmp.priorityIndex.PopTx() txs = append(txs, wtx.tx) wTxs = append(wTxs, wtx) - size := types.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) + size := block.ComputeProtoSizeForTxs([]types.Tx{wtx.tx}) // Ensure we have capacity for the transaction with respect to the // transaction size. diff --git a/internal/mempool/v1/mempool_test.go b/internal/mempool/v1/mempool_test.go index df26be726..ceb16a3ca 100644 --- a/internal/mempool/v1/mempool_test.go +++ b/internal/mempool/v1/mempool_test.go @@ -17,12 +17,12 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) // application extends the KV store application by overriding CheckTx to provide diff --git a/internal/mempool/v1/reactor.go b/internal/mempool/v1/reactor.go index 3014e0519..a12fadb84 100644 --- a/internal/mempool/v1/reactor.go +++ b/internal/mempool/v1/reactor.go @@ -15,8 +15,9 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + types "github.com/tendermint/tendermint/pkg/mempool" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" ) var ( @@ -28,7 +29,7 @@ var ( // peer information. This should eventually be replaced with a message-oriented // approach utilizing the p2p stack. type PeerManager interface { - GetHeight(types.NodeID) int64 + GetHeight(p2ptypes.NodeID) int64 } // Reactor implements a service that contains mempool of txs that are broadcasted @@ -59,7 +60,7 @@ type Reactor struct { observePanic func(interface{}) mtx tmsync.Mutex - peerRoutines map[types.NodeID]*tmsync.Closer + peerRoutines map[p2ptypes.NodeID]*tmsync.Closer } // NewReactor returns a reference to a new reactor. @@ -80,7 +81,7 @@ func NewReactor( mempoolCh: mempoolCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), - peerRoutines: make(map[types.NodeID]*tmsync.Closer), + peerRoutines: make(map[p2ptypes.NodeID]*tmsync.Closer), observePanic: defaultObservePanic, } @@ -313,7 +314,7 @@ func (r *Reactor) processPeerUpdates() { } } -func (r *Reactor) broadcastTxRoutine(peerID types.NodeID, closer *tmsync.Closer) { +func (r *Reactor) broadcastTxRoutine(peerID p2ptypes.NodeID, closer *tmsync.Closer) { peerMempoolID := r.ids.GetForPeer(peerID) var nextGossipTx *clist.CElement diff --git a/internal/mempool/v1/reactor_test.go b/internal/mempool/v1/reactor_test.go index 5934d534c..129e6036a 100644 --- a/internal/mempool/v1/reactor_test.go +++ b/internal/mempool/v1/reactor_test.go @@ -14,23 +14,23 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" protomem "github.com/tendermint/tendermint/proto/tendermint/mempool" - "github.com/tendermint/tendermint/types" ) type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*Reactor - mempoolChannels map[types.NodeID]*p2p.Channel - mempools map[types.NodeID]*TxMempool - kvstores map[types.NodeID]*kvstore.Application + reactors map[p2ptypes.NodeID]*Reactor + mempoolChannels map[p2ptypes.NodeID]*p2p.Channel + mempools map[p2ptypes.NodeID]*TxMempool + kvstores map[p2ptypes.NodeID]*kvstore.Application - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates + peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate + peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates - nodes []types.NodeID + nodes []p2ptypes.NodeID } func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { @@ -44,12 +44,12 @@ func setupReactors(t *testing.T, numNodes int, chBuf uint) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(t, p2ptest.NetworkOptions{NumNodes: numNodes}), - reactors: make(map[types.NodeID]*Reactor, numNodes), - mempoolChannels: make(map[types.NodeID]*p2p.Channel, numNodes), - mempools: make(map[types.NodeID]*TxMempool, numNodes), - kvstores: make(map[types.NodeID]*kvstore.Application, numNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, numNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, numNodes), + reactors: make(map[p2ptypes.NodeID]*Reactor, numNodes), + mempoolChannels: make(map[p2ptypes.NodeID]*p2p.Channel, numNodes), + mempools: make(map[p2ptypes.NodeID]*TxMempool, numNodes), + kvstores: make(map[p2ptypes.NodeID]*kvstore.Application, numNodes), + peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, numNodes), + peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, numNodes), } chDesc := p2p.ChannelDescriptor{ID: byte(mempool.MempoolChannel)} diff --git a/internal/mempool/v1/tx.go b/internal/mempool/v1/tx.go index 15173b91f..cfa0eaf49 100644 --- a/internal/mempool/v1/tx.go +++ b/internal/mempool/v1/tx.go @@ -7,7 +7,7 @@ import ( "github.com/tendermint/tendermint/internal/libs/clist" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" + types "github.com/tendermint/tendermint/pkg/mempool" ) // WrappedTx defines a wrapper around a raw transaction with additional metadata diff --git a/internal/p2p/address.go b/internal/p2p/address.go index 7c084216e..48572efa5 100644 --- a/internal/p2p/address.go +++ b/internal/p2p/address.go @@ -10,7 +10,7 @@ import ( "strconv" "strings" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) var ( @@ -31,7 +31,7 @@ var ( // If the URL is opaque, i.e. of the form "scheme:opaque", then the opaque part // is expected to contain a node ID. type NodeAddress struct { - NodeID types.NodeID + NodeID p2p.NodeID Protocol Protocol Hostname string Port uint16 @@ -58,13 +58,13 @@ func ParseNodeAddress(urlString string) (NodeAddress, error) { // Opaque URLs are expected to contain only a node ID. if url.Opaque != "" { - address.NodeID = types.NodeID(url.Opaque) + address.NodeID = p2p.NodeID(url.Opaque) return address, address.Validate() } // Otherwise, just parse a normal networked URL. if url.User != nil { - address.NodeID = types.NodeID(strings.ToLower(url.User.Username())) + address.NodeID = p2p.NodeID(strings.ToLower(url.User.Username())) } address.Hostname = strings.ToLower(url.Hostname()) diff --git a/internal/p2p/address_test.go b/internal/p2p/address_test.go index 2745faf73..abf0b35a2 100644 --- a/internal/p2p/address_test.go +++ b/internal/p2p/address_test.go @@ -9,14 +9,14 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) func TestNewNodeID(t *testing.T) { // Most tests are in TestNodeID_Validate, this just checks that it's validated. testcases := []struct { input string - expect types.NodeID + expect p2ptypes.NodeID ok bool }{ {"", "", false}, @@ -29,7 +29,7 @@ func TestNewNodeID(t *testing.T) { for _, tc := range testcases { tc := tc t.Run(tc.input, func(t *testing.T) { - id, err := types.NewNodeID(tc.input) + id, err := p2ptypes.NewNodeID(tc.input) if !tc.ok { require.Error(t, err) } else { @@ -42,14 +42,14 @@ func TestNewNodeID(t *testing.T) { func TestNewNodeIDFromPubKey(t *testing.T) { privKey := ed25519.GenPrivKeyFromSecret([]byte("foo")) - nodeID := types.NodeIDFromPubKey(privKey.PubKey()) - require.Equal(t, types.NodeID("045f5600654182cfeaccfe6cb19f0642e8a59898"), nodeID) + nodeID := p2ptypes.NodeIDFromPubKey(privKey.PubKey()) + require.Equal(t, p2ptypes.NodeID("045f5600654182cfeaccfe6cb19f0642e8a59898"), nodeID) require.NoError(t, nodeID.Validate()) } func TestNodeID_Bytes(t *testing.T) { testcases := []struct { - nodeID types.NodeID + nodeID p2ptypes.NodeID expect []byte ok bool }{ @@ -75,7 +75,7 @@ func TestNodeID_Bytes(t *testing.T) { func TestNodeID_Validate(t *testing.T) { testcases := []struct { - nodeID types.NodeID + nodeID p2ptypes.NodeID ok bool }{ {"", false}, @@ -100,7 +100,7 @@ func TestNodeID_Validate(t *testing.T) { func TestParseNodeAddress(t *testing.T) { user := "00112233445566778899aabbccddeeff00112233" - id := types.NodeID(user) + id := p2ptypes.NodeID(user) testcases := []struct { url string @@ -202,7 +202,7 @@ func TestParseNodeAddress(t *testing.T) { } func TestNodeAddress_Resolve(t *testing.T) { - id := types.NodeID("00112233445566778899aabbccddeeff00112233") + id := p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233") testcases := []struct { address p2p.NodeAddress @@ -286,7 +286,7 @@ func TestNodeAddress_Resolve(t *testing.T) { } func TestNodeAddress_String(t *testing.T) { - id := types.NodeID("00112233445566778899aabbccddeeff00112233") + id := p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233") user := string(id) testcases := []struct { address p2p.NodeAddress @@ -349,7 +349,7 @@ func TestNodeAddress_String(t *testing.T) { } func TestNodeAddress_Validate(t *testing.T) { - id := types.NodeID("00112233445566778899aabbccddeeff00112233") + id := p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233") testcases := []struct { address p2p.NodeAddress ok bool diff --git a/internal/p2p/errors.go b/internal/p2p/errors.go index 648f2cb3a..051473611 100644 --- a/internal/p2p/errors.go +++ b/internal/p2p/errors.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // ErrFilterTimeout indicates that a filter operation timed out. @@ -20,7 +20,7 @@ type ErrRejected struct { addr NetAddress conn net.Conn err error - id types.NodeID + id p2p.NodeID isAuthFailure bool isDuplicate bool isFiltered bool @@ -101,7 +101,7 @@ func (e ErrRejected) IsSelf() bool { return e.isSelf } // ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known // ID. type ErrSwitchDuplicatePeerID struct { - ID types.NodeID + ID p2p.NodeID } func (e ErrSwitchDuplicatePeerID) Error() string { @@ -129,7 +129,7 @@ func (e ErrSwitchConnectToSelf) Error() string { type ErrSwitchAuthenticationFailure struct { Dialed *NetAddress - Got types.NodeID + Got p2p.NodeID } func (e ErrSwitchAuthenticationFailure) Error() string { diff --git a/internal/p2p/mock/peer.go b/internal/p2p/mock/peer.go index cede51768..3f62bca05 100644 --- a/internal/p2p/mock/peer.go +++ b/internal/p2p/mock/peer.go @@ -6,13 +6,13 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) type Peer struct { *service.BaseService ip net.IP - id types.NodeID + id p2ptypes.NodeID addr *p2p.NetAddress kv map[string]interface{} Outbound, Persistent bool @@ -25,9 +25,9 @@ func NewPeer(ip net.IP) *Peer { if ip == nil { _, netAddr = p2p.CreateRoutableAddr() } else { - netAddr = types.NewNetAddressIPPort(ip, 26656) + netAddr = p2ptypes.NewNetAddressIPPort(ip, 26656) } - nodeKey := types.GenNodeKey() + nodeKey := p2ptypes.GenNodeKey() netAddr.ID = nodeKey.ID mp := &Peer{ ip: ip, @@ -45,14 +45,14 @@ func NewPeer(ip net.IP) *Peer { func (mp *Peer) FlushStop() { mp.Stop() } //nolint:errcheck //ignore error func (mp *Peer) TrySend(chID byte, msgBytes []byte) bool { return true } func (mp *Peer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *Peer) NodeInfo() types.NodeInfo { - return types.NodeInfo{ +func (mp *Peer) NodeInfo() p2ptypes.NodeInfo { + return p2ptypes.NodeInfo{ NodeID: mp.addr.ID, ListenAddr: mp.addr.DialString(), } } func (mp *Peer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp *Peer) ID() types.NodeID { return mp.id } +func (mp *Peer) ID() p2ptypes.NodeID { return mp.id } func (mp *Peer) IsOutbound() bool { return mp.Outbound } func (mp *Peer) IsPersistent() bool { return mp.Persistent } func (mp *Peer) Get(key string) interface{} { diff --git a/internal/p2p/mocks/connection.go b/internal/p2p/mocks/connection.go index 6c6174117..21f501dda 100644 --- a/internal/p2p/mocks/connection.go +++ b/internal/p2p/mocks/connection.go @@ -13,7 +13,7 @@ import ( p2p "github.com/tendermint/tendermint/internal/p2p" - types "github.com/tendermint/tendermint/types" + pkgp2p "github.com/tendermint/tendermint/pkg/p2p" ) // Connection is an autogenerated mock type for the Connection type @@ -50,18 +50,18 @@ func (_m *Connection) FlushClose() error { } // Handshake provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) { +func (_m *Connection) Handshake(_a0 context.Context, _a1 pkgp2p.NodeInfo, _a2 crypto.PrivKey) (pkgp2p.NodeInfo, crypto.PubKey, error) { ret := _m.Called(_a0, _a1, _a2) - var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func(context.Context, types.NodeInfo, crypto.PrivKey) types.NodeInfo); ok { + var r0 pkgp2p.NodeInfo + if rf, ok := ret.Get(0).(func(context.Context, pkgp2p.NodeInfo, crypto.PrivKey) pkgp2p.NodeInfo); ok { r0 = rf(_a0, _a1, _a2) } else { - r0 = ret.Get(0).(types.NodeInfo) + r0 = ret.Get(0).(pkgp2p.NodeInfo) } var r1 crypto.PubKey - if rf, ok := ret.Get(1).(func(context.Context, types.NodeInfo, crypto.PrivKey) crypto.PubKey); ok { + if rf, ok := ret.Get(1).(func(context.Context, pkgp2p.NodeInfo, crypto.PrivKey) crypto.PubKey); ok { r1 = rf(_a0, _a1, _a2) } else { if ret.Get(1) != nil { @@ -70,7 +70,7 @@ func (_m *Connection) Handshake(_a0 context.Context, _a1 types.NodeInfo, _a2 cry } var r2 error - if rf, ok := ret.Get(2).(func(context.Context, types.NodeInfo, crypto.PrivKey) error); ok { + if rf, ok := ret.Get(2).(func(context.Context, pkgp2p.NodeInfo, crypto.PrivKey) error); ok { r2 = rf(_a0, _a1, _a2) } else { r2 = ret.Error(2) diff --git a/internal/p2p/mocks/peer.go b/internal/p2p/mocks/peer.go index b905c1156..a2d99c3d5 100644 --- a/internal/p2p/mocks/peer.go +++ b/internal/p2p/mocks/peer.go @@ -10,7 +10,7 @@ import ( net "net" - types "github.com/tendermint/tendermint/types" + pkgp2p "github.com/tendermint/tendermint/pkg/p2p" ) // Peer is an autogenerated mock type for the Peer type @@ -54,14 +54,14 @@ func (_m *Peer) Get(_a0 string) interface{} { } // ID provides a mock function with given fields: -func (_m *Peer) ID() types.NodeID { +func (_m *Peer) ID() pkgp2p.NodeID { ret := _m.Called() - var r0 types.NodeID - if rf, ok := ret.Get(0).(func() types.NodeID); ok { + var r0 pkgp2p.NodeID + if rf, ok := ret.Get(0).(func() pkgp2p.NodeID); ok { r0 = rf() } else { - r0 = ret.Get(0).(types.NodeID) + r0 = ret.Get(0).(pkgp2p.NodeID) } return r0 @@ -110,14 +110,14 @@ func (_m *Peer) IsRunning() bool { } // NodeInfo provides a mock function with given fields: -func (_m *Peer) NodeInfo() types.NodeInfo { +func (_m *Peer) NodeInfo() pkgp2p.NodeInfo { ret := _m.Called() - var r0 types.NodeInfo - if rf, ok := ret.Get(0).(func() types.NodeInfo); ok { + var r0 pkgp2p.NodeInfo + if rf, ok := ret.Get(0).(func() pkgp2p.NodeInfo); ok { r0 = rf() } else { - r0 = ret.Get(0).(types.NodeInfo) + r0 = ret.Get(0).(pkgp2p.NodeInfo) } return r0 @@ -243,15 +243,15 @@ func (_m *Peer) SetLogger(_a0 log.Logger) { } // SocketAddr provides a mock function with given fields: -func (_m *Peer) SocketAddr() *types.NetAddress { +func (_m *Peer) SocketAddr() *pkgp2p.NetAddress { ret := _m.Called() - var r0 *types.NetAddress - if rf, ok := ret.Get(0).(func() *types.NetAddress); ok { + var r0 *pkgp2p.NetAddress + if rf, ok := ret.Get(0).(func() *pkgp2p.NetAddress); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.NetAddress) + r0 = ret.Get(0).(*pkgp2p.NetAddress) } } diff --git a/internal/p2p/netaddress.go b/internal/p2p/netaddress.go index 6fce3a769..6b360d646 100644 --- a/internal/p2p/netaddress.go +++ b/internal/p2p/netaddress.go @@ -5,7 +5,7 @@ package p2p import ( - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) -type NetAddress = types.NetAddress +type NetAddress = p2p.NetAddress diff --git a/internal/p2p/p2p_test.go b/internal/p2p/p2p_test.go index 6e524d492..5d1482978 100644 --- a/internal/p2p/p2p_test.go +++ b/internal/p2p/p2p_test.go @@ -6,7 +6,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // Common setup for P2P tests. @@ -23,8 +23,8 @@ var ( } selfKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd}) - selfID = types.NodeIDFromPubKey(selfKey.PubKey()) - selfInfo = types.NodeInfo{ + selfID = p2ptypes.NodeIDFromPubKey(selfKey.PubKey()) + selfInfo = p2ptypes.NodeInfo{ NodeID: selfID, ListenAddr: "0.0.0.0:0", Network: "test", @@ -33,8 +33,8 @@ var ( } peerKey crypto.PrivKey = ed25519.GenPrivKeyFromSecret([]byte{0x84, 0xd7, 0x01, 0xbf, 0x83, 0x20, 0x1c, 0xfe}) - peerID = types.NodeIDFromPubKey(peerKey.PubKey()) - peerInfo = types.NodeInfo{ + peerID = p2ptypes.NodeIDFromPubKey(peerKey.PubKey()) + peerInfo = p2ptypes.NodeInfo{ NodeID: peerID, ListenAddr: "0.0.0.0:0", Network: "test", diff --git a/internal/p2p/p2ptest/network.go b/internal/p2p/p2ptest/network.go index 1daba3f14..f975012af 100644 --- a/internal/p2p/p2ptest/network.go +++ b/internal/p2p/p2ptest/network.go @@ -14,14 +14,14 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // Network sets up an in-memory network that can be used for high-level P2P // testing. It creates an arbitrary number of nodes that are connected to each // other, and can open channels across all nodes with custom reactors. type Network struct { - Nodes map[types.NodeID]*Node + Nodes map[p2ptypes.NodeID]*Node logger log.Logger memoryNetwork *p2p.MemoryNetwork @@ -52,7 +52,7 @@ func MakeNetwork(t *testing.T, opts NetworkOptions) *Network { opts.setDefaults() logger := log.TestingLogger() network := &Network{ - Nodes: map[types.NodeID]*Node{}, + Nodes: map[p2ptypes.NodeID]*Node{}, logger: logger, memoryNetwork: p2p.NewMemoryNetwork(logger, opts.BufferSize), } @@ -72,7 +72,7 @@ func (n *Network) Start(t *testing.T) { // Set up a list of node addresses to dial, and a peer update subscription // for each node. dialQueue := []p2p.NodeAddress{} - subs := map[types.NodeID]*p2p.PeerUpdates{} + subs := map[p2ptypes.NodeID]*p2p.PeerUpdates{} for _, node := range n.Nodes { dialQueue = append(dialQueue, node.NodeAddress) subs[node.NodeID] = node.PeerManager.Subscribe() @@ -125,8 +125,8 @@ func (n *Network) Start(t *testing.T) { } // NodeIDs returns the network's node IDs. -func (n *Network) NodeIDs() []types.NodeID { - ids := []types.NodeID{} +func (n *Network) NodeIDs() []p2ptypes.NodeID { + ids := []p2ptypes.NodeID{} for id := range n.Nodes { ids = append(ids, id) } @@ -140,8 +140,8 @@ func (n *Network) MakeChannels( chDesc p2p.ChannelDescriptor, messageType proto.Message, size int, -) map[types.NodeID]*p2p.Channel { - channels := map[types.NodeID]*p2p.Channel{} +) map[p2ptypes.NodeID]*p2p.Channel { + channels := map[p2ptypes.NodeID]*p2p.Channel{} for _, node := range n.Nodes { channels[node.NodeID] = node.MakeChannel(t, chDesc, messageType, size) } @@ -156,8 +156,8 @@ func (n *Network) MakeChannelsNoCleanup( chDesc p2p.ChannelDescriptor, messageType proto.Message, size int, -) map[types.NodeID]*p2p.Channel { - channels := map[types.NodeID]*p2p.Channel{} +) map[p2ptypes.NodeID]*p2p.Channel { + channels := map[p2ptypes.NodeID]*p2p.Channel{} for _, node := range n.Nodes { channels[node.NodeID] = node.MakeChannelNoCleanup(t, chDesc, messageType, size) } @@ -174,7 +174,7 @@ func (n *Network) RandomNode() *Node { } // Peers returns a node's peers (i.e. everyone except itself). -func (n *Network) Peers(id types.NodeID) []*Node { +func (n *Network) Peers(id p2ptypes.NodeID) []*Node { peers := make([]*Node, 0, len(n.Nodes)-1) for _, peer := range n.Nodes { if peer.NodeID != id { @@ -186,7 +186,7 @@ func (n *Network) Peers(id types.NodeID) []*Node { // Remove removes a node from the network, stopping it and waiting for all other // nodes to pick up the disconnection. -func (n *Network) Remove(t *testing.T, id types.NodeID) { +func (n *Network) Remove(t *testing.T, id p2ptypes.NodeID) { require.Contains(t, n.Nodes, id) node := n.Nodes[id] delete(n.Nodes, id) @@ -214,8 +214,8 @@ func (n *Network) Remove(t *testing.T, id types.NodeID) { // Node is a node in a Network, with a Router and a PeerManager. type Node struct { - NodeID types.NodeID - NodeInfo types.NodeInfo + NodeID p2ptypes.NodeID + NodeInfo p2ptypes.NodeInfo NodeAddress p2p.NodeAddress PrivKey crypto.PrivKey Router *p2p.Router @@ -228,8 +228,8 @@ type Node struct { // network. Callers are responsible for updating peering relationships. func (n *Network) MakeNode(t *testing.T, opts NodeOptions) *Node { privKey := ed25519.GenPrivKey() - nodeID := types.NodeIDFromPubKey(privKey.PubKey()) - nodeInfo := types.NodeInfo{ + nodeID := p2ptypes.NodeIDFromPubKey(privKey.PubKey()) + nodeInfo := p2ptypes.NodeInfo{ NodeID: nodeID, ListenAddr: "0.0.0.0:0", // FIXME: We have to fake this for now. Moniker: string(nodeID), diff --git a/internal/p2p/p2ptest/require.go b/internal/p2p/p2ptest/require.go index 3598baba0..5e4c3f571 100644 --- a/internal/p2p/p2ptest/require.go +++ b/internal/p2p/p2ptest/require.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // RequireEmpty requires that the given channel is empty. @@ -84,7 +84,7 @@ func RequireSend(t *testing.T, channel *p2p.Channel, envelope p2p.Envelope) { func RequireSendReceive( t *testing.T, channel *p2p.Channel, - peerID types.NodeID, + peerID p2ptypes.NodeID, send proto.Message, receive proto.Message, ) { diff --git a/internal/p2p/p2ptest/util.go b/internal/p2p/p2ptest/util.go index 544e937bb..b7bf1e897 100644 --- a/internal/p2p/p2ptest/util.go +++ b/internal/p2p/p2ptest/util.go @@ -2,13 +2,13 @@ package p2ptest import ( gogotypes "github.com/gogo/protobuf/types" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // Message is a simple message containing a string-typed Value field. type Message = gogotypes.StringValue -func NodeInSlice(id types.NodeID, ids []types.NodeID) bool { +func NodeInSlice(id p2ptypes.NodeID, ids []p2ptypes.NodeID) bool { for _, n := range ids { if id == n { return true diff --git a/internal/p2p/peer.go b/internal/p2p/peer.go index 709a1294a..1591725fa 100644 --- a/internal/p2p/peer.go +++ b/internal/p2p/peer.go @@ -11,7 +11,7 @@ import ( "github.com/tendermint/tendermint/libs/cmap" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) //go:generate ../../scripts/mockery_generate.sh Peer @@ -23,7 +23,7 @@ type Peer interface { service.Service FlushStop() - ID() types.NodeID // peer's cryptographic ID + ID() p2p.NodeID // peer's cryptographic ID RemoteIP() net.IP // remote IP of the connection RemoteAddr() net.Addr // remote address of the connection @@ -32,7 +32,7 @@ type Peer interface { CloseConn() error // close original connection - NodeInfo() types.NodeInfo // peer's info + NodeInfo() p2p.NodeInfo // peer's info Status() tmconn.ConnectionStatus SocketAddr() *NetAddress // actual address of the socket @@ -81,7 +81,7 @@ type peer struct { // peer's node info and the channel it knows about // channels = nodeInfo.Channels // cached to avoid copying nodeInfo in hasChannel - nodeInfo types.NodeInfo + nodeInfo p2p.NodeInfo channels []byte reactors map[byte]Reactor onPeerError func(Peer, interface{}) @@ -96,7 +96,7 @@ type peer struct { type PeerOption func(*peer) func newPeer( - nodeInfo types.NodeInfo, + nodeInfo p2p.NodeInfo, pc peerConn, reactorsByCh map[byte]Reactor, onPeerError func(Peer, interface{}), @@ -203,7 +203,7 @@ func (p *peer) OnStop() { // Implements Peer // ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() types.NodeID { +func (p *peer) ID() p2p.NodeID { return p.nodeInfo.ID() } @@ -218,7 +218,7 @@ func (p *peer) IsPersistent() bool { } // NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() types.NodeInfo { +func (p *peer) NodeInfo() p2p.NodeInfo { return p.nodeInfo } diff --git a/internal/p2p/peer_set.go b/internal/p2p/peer_set.go index 8d4ad4939..29f74820c 100644 --- a/internal/p2p/peer_set.go +++ b/internal/p2p/peer_set.go @@ -4,14 +4,14 @@ import ( "net" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // IPeerSet has a (immutable) subset of the methods of PeerSet. type IPeerSet interface { - Has(key types.NodeID) bool + Has(key p2p.NodeID) bool HasIP(ip net.IP) bool - Get(key types.NodeID) Peer + Get(key p2p.NodeID) Peer List() []Peer Size() int } @@ -22,7 +22,7 @@ type IPeerSet interface { // Iteration over the peers is super fast and thread-safe. type PeerSet struct { mtx tmsync.Mutex - lookup map[types.NodeID]*peerSetItem + lookup map[p2p.NodeID]*peerSetItem list []Peer } @@ -34,7 +34,7 @@ type peerSetItem struct { // NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. func NewPeerSet() *PeerSet { return &PeerSet{ - lookup: make(map[types.NodeID]*peerSetItem), + lookup: make(map[p2p.NodeID]*peerSetItem), list: make([]Peer, 0, 256), } } @@ -59,7 +59,7 @@ func (ps *PeerSet) Add(peer Peer) error { // Has returns true if the set contains the peer referred to by this // peerKey, otherwise false. -func (ps *PeerSet) Has(peerKey types.NodeID) bool { +func (ps *PeerSet) Has(peerKey p2p.NodeID) bool { ps.mtx.Lock() _, ok := ps.lookup[peerKey] ps.mtx.Unlock() @@ -89,7 +89,7 @@ func (ps *PeerSet) hasIP(peerIP net.IP) bool { // Get looks up a peer by the provided peerKey. Returns nil if peer is not // found. -func (ps *PeerSet) Get(peerKey types.NodeID) Peer { +func (ps *PeerSet) Get(peerKey p2p.NodeID) Peer { ps.mtx.Lock() defer ps.mtx.Unlock() item, ok := ps.lookup[peerKey] diff --git a/internal/p2p/peer_set_test.go b/internal/p2p/peer_set_test.go index 3e2397d2d..aef40f54c 100644 --- a/internal/p2p/peer_set_test.go +++ b/internal/p2p/peer_set_test.go @@ -8,22 +8,22 @@ import ( "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // mockPeer for testing the PeerSet type mockPeer struct { service.BaseService ip net.IP - id types.NodeID + id p2p.NodeID } func (mp *mockPeer) FlushStop() { mp.Stop() } //nolint:errcheck // ignore error func (mp *mockPeer) TrySend(chID byte, msgBytes []byte) bool { return true } func (mp *mockPeer) Send(chID byte, msgBytes []byte) bool { return true } -func (mp *mockPeer) NodeInfo() types.NodeInfo { return types.NodeInfo{} } +func (mp *mockPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} } func (mp *mockPeer) Status() ConnectionStatus { return ConnectionStatus{} } -func (mp *mockPeer) ID() types.NodeID { return mp.id } +func (mp *mockPeer) ID() p2p.NodeID { return mp.id } func (mp *mockPeer) IsOutbound() bool { return false } func (mp *mockPeer) IsPersistent() bool { return true } func (mp *mockPeer) Get(s string) interface{} { return s } @@ -38,7 +38,7 @@ func newMockPeer(ip net.IP) *mockPeer { if ip == nil { ip = net.IP{127, 0, 0, 1} } - nodeKey := types.GenNodeKey() + nodeKey := p2p.GenNodeKey() return &mockPeer{ ip: ip, id: nodeKey.ID, diff --git a/internal/p2p/peer_test.go b/internal/p2p/peer_test.go index dfe7bc798..009fd4f47 100644 --- a/internal/p2p/peer_test.go +++ b/internal/p2p/peer_test.go @@ -15,7 +15,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/config" tmconn "github.com/tendermint/tendermint/internal/p2p/conn" @@ -83,7 +83,7 @@ func createOutboundPeerAndPerformHandshake( {ID: testCh, Priority: 1}, } pk := ed25519.GenPrivKey() - ourNodeInfo := testNodeInfo(types.NodeIDFromPubKey(pk.PubKey()), "host_peer") + ourNodeInfo := testNodeInfo(p2p.NodeIDFromPubKey(pk.PubKey()), "host_peer") transport := NewMConnTransport(log.TestingLogger(), mConfig, chDescs, MConnTransportOptions{}) reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} pc, err := testOutboundPeerConn(transport, addr, config, false, pk) @@ -151,8 +151,8 @@ func (rp *remotePeer) Addr() *NetAddress { return rp.addr } -func (rp *remotePeer) ID() types.NodeID { - return types.NodeIDFromPubKey(rp.PrivKey.PubKey()) +func (rp *remotePeer) ID() p2p.NodeID { + return p2p.NodeIDFromPubKey(rp.PrivKey.PubKey()) } func (rp *remotePeer) Start() { @@ -165,7 +165,7 @@ func (rp *remotePeer) Start() { golog.Fatalf("net.Listen tcp :0: %+v", e) } rp.listener = l - rp.addr = types.NewNetAddress(types.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr()) + rp.addr = p2p.NewNetAddress(p2p.NodeIDFromPubKey(rp.PrivKey.PubKey()), l.Addr()) if rp.channels == nil { rp.channels = []byte{testCh} } @@ -222,8 +222,8 @@ func (rp *remotePeer) accept() { } } -func (rp *remotePeer) nodeInfo() types.NodeInfo { - ni := types.NodeInfo{ +func (rp *remotePeer) nodeInfo() p2p.NodeInfo { + ni := p2p.NodeInfo{ ProtocolVersion: defaultProtocolVersion, NodeID: rp.Addr().ID, ListenAddr: rp.listener.Addr().String(), diff --git a/internal/p2p/peermanager.go b/internal/p2p/peermanager.go index 1e9afb38b..7f6950405 100644 --- a/internal/p2p/peermanager.go +++ b/internal/p2p/peermanager.go @@ -15,8 +15,8 @@ import ( dbm "github.com/tendermint/tm-db" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/pkg/p2p" p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) const ( @@ -47,7 +47,7 @@ const ( // PeerUpdate is a peer update event sent via PeerUpdates. type PeerUpdate struct { - NodeID types.NodeID + NodeID p2p.NodeID Status PeerStatus } @@ -106,7 +106,7 @@ type PeerManagerOptions struct { // to. These will be scored higher than other peers, and if // MaxConnectedUpgrade is non-zero any lower-scored peers will be evicted if // necessary to make room for these. - PersistentPeers []types.NodeID + PersistentPeers []p2p.NodeID // MaxPeers is the maximum number of peers to track information about, i.e. // store in the peer store. When exceeded, the lowest-scored unconnected peers @@ -148,15 +148,15 @@ type PeerManagerOptions struct { // PeerScores sets fixed scores for specific peers. It is mainly used // for testing. A score of 0 is ignored. - PeerScores map[types.NodeID]PeerScore + PeerScores map[p2p.NodeID]PeerScore // PrivatePeerIDs defines a set of NodeID objects which the PEX reactor will // consider private and never gossip. - PrivatePeers map[types.NodeID]struct{} + PrivatePeers map[p2p.NodeID]struct{} // persistentPeers provides fast PersistentPeers lookups. It is built // by optimize(). - persistentPeers map[types.NodeID]bool + persistentPeers map[p2p.NodeID]bool } // Validate validates the options. @@ -210,7 +210,7 @@ func (o *PeerManagerOptions) Validate() error { // isPersistentPeer checks if a peer is in PersistentPeers. It will panic // if called before optimize(). -func (o *PeerManagerOptions) isPersistent(id types.NodeID) bool { +func (o *PeerManagerOptions) isPersistent(id p2p.NodeID) bool { if o.persistentPeers == nil { panic("isPersistentPeer() called before optimize()") } @@ -221,7 +221,7 @@ func (o *PeerManagerOptions) isPersistent(id types.NodeID) bool { // separate method instead of memoizing during calls to avoid dealing with // concurrency and mutex overhead. func (o *PeerManagerOptions) optimize() { - o.persistentPeers = make(map[types.NodeID]bool, len(o.PersistentPeers)) + o.persistentPeers = make(map[p2p.NodeID]bool, len(o.PersistentPeers)) for _, p := range o.PersistentPeers { o.persistentPeers[p] = true } @@ -271,7 +271,7 @@ func (o *PeerManagerOptions) optimize() { // - EvictNext: pick peer from evict, mark as evicting. // - Disconnected: unmark connected, upgrading[from]=to, evict, evicting. type PeerManager struct { - selfID types.NodeID + selfID p2p.NodeID options PeerManagerOptions rand *rand.Rand dialWaker *tmsync.Waker // wakes up DialNext() on relevant peer changes @@ -282,16 +282,16 @@ type PeerManager struct { mtx sync.Mutex store *peerStore subscriptions map[*PeerUpdates]*PeerUpdates // keyed by struct identity (address) - dialing map[types.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail) - upgrading map[types.NodeID]types.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail) - connected map[types.NodeID]bool // connected peers (Dialed/Accepted → Disconnected) - ready map[types.NodeID]bool // ready peers (Ready → Disconnected) - evict map[types.NodeID]bool // peers scheduled for eviction (Connected → EvictNext) - evicting map[types.NodeID]bool // peers being evicted (EvictNext → Disconnected) + dialing map[p2p.NodeID]bool // peers being dialed (DialNext → Dialed/DialFail) + upgrading map[p2p.NodeID]p2p.NodeID // peers claimed for upgrade (DialNext → Dialed/DialFail) + connected map[p2p.NodeID]bool // connected peers (Dialed/Accepted → Disconnected) + ready map[p2p.NodeID]bool // ready peers (Ready → Disconnected) + evict map[p2p.NodeID]bool // peers scheduled for eviction (Connected → EvictNext) + evicting map[p2p.NodeID]bool // peers being evicted (EvictNext → Disconnected) } // NewPeerManager creates a new peer manager. -func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) { +func NewPeerManager(selfID p2p.NodeID, peerDB dbm.DB, options PeerManagerOptions) (*PeerManager, error) { if selfID == "" { return nil, errors.New("self ID not given") } @@ -315,12 +315,12 @@ func NewPeerManager(selfID types.NodeID, peerDB dbm.DB, options PeerManagerOptio closeCh: make(chan struct{}), store: store, - dialing: map[types.NodeID]bool{}, - upgrading: map[types.NodeID]types.NodeID{}, - connected: map[types.NodeID]bool{}, - ready: map[types.NodeID]bool{}, - evict: map[types.NodeID]bool{}, - evicting: map[types.NodeID]bool{}, + dialing: map[p2p.NodeID]bool{}, + upgrading: map[p2p.NodeID]p2p.NodeID{}, + connected: map[p2p.NodeID]bool{}, + ready: map[p2p.NodeID]bool{}, + evict: map[p2p.NodeID]bool{}, + evicting: map[p2p.NodeID]bool{}, subscriptions: map[*PeerUpdates]*PeerUpdates{}, } if err = peerManager.configurePeers(); err != nil { @@ -340,7 +340,7 @@ func (m *PeerManager) configurePeers() error { return err } - configure := map[types.NodeID]bool{} + configure := map[p2p.NodeID]bool{} for _, id := range m.options.PersistentPeers { configure[id] = true } @@ -365,7 +365,7 @@ func (m *PeerManager) configurePeer(peer peerInfo) peerInfo { } // newPeerInfo creates a peerInfo for a new peer. -func (m *PeerManager) newPeerInfo(id types.NodeID) peerInfo { +func (m *PeerManager) newPeerInfo(id p2p.NodeID) peerInfo { peerInfo := peerInfo{ ID: id, AddressInfo: map[NodeAddress]*peerAddressInfo{}, @@ -569,7 +569,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error { delete(m.dialing, address.NodeID) - var upgradeFromPeer types.NodeID + var upgradeFromPeer p2p.NodeID for from, to := range m.upgrading { if to == address.NodeID { delete(m.upgrading, from) @@ -640,7 +640,7 @@ func (m *PeerManager) Dialed(address NodeAddress) error { // that, we'll need to get the remote address after all, but as noted above that // can't be the remote endpoint since that will usually have the wrong port // number. -func (m *PeerManager) Accepted(peerID types.NodeID) error { +func (m *PeerManager) Accepted(peerID p2p.NodeID) error { m.mtx.Lock() defer m.mtx.Unlock() @@ -663,7 +663,7 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { // If all connections slots are full, but we allow upgrades (and we checked // above that we have upgrade capacity), then we can look for a lower-scored // peer to replace and if found accept the connection anyway and evict it. - var upgradeFromPeer types.NodeID + var upgradeFromPeer p2p.NodeID if m.options.MaxConnected > 0 && len(m.connected) >= int(m.options.MaxConnected) { upgradeFromPeer = m.findUpgradeCandidate(peer.ID, peer.Score()) if upgradeFromPeer == "" { @@ -688,7 +688,7 @@ func (m *PeerManager) Accepted(peerID types.NodeID) error { // peer must already be marked as connected. This is separate from Dialed() and // Accepted() to allow the router to set up its internal queues before reactors // start sending messages. -func (m *PeerManager) Ready(peerID types.NodeID) { +func (m *PeerManager) Ready(peerID p2p.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() @@ -703,7 +703,7 @@ func (m *PeerManager) Ready(peerID types.NodeID) { // EvictNext returns the next peer to evict (i.e. disconnect). If no evictable // peers are found, the call will block until one becomes available. -func (m *PeerManager) EvictNext(ctx context.Context) (types.NodeID, error) { +func (m *PeerManager) EvictNext(ctx context.Context) (p2p.NodeID, error) { for { id, err := m.TryEvictNext() if err != nil || id != "" { @@ -719,7 +719,7 @@ func (m *PeerManager) EvictNext(ctx context.Context) (types.NodeID, error) { // TryEvictNext is equivalent to EvictNext, but immediately returns an empty // node ID if no evictable peers are found. -func (m *PeerManager) TryEvictNext() (types.NodeID, error) { +func (m *PeerManager) TryEvictNext() (p2p.NodeID, error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -755,7 +755,7 @@ func (m *PeerManager) TryEvictNext() (types.NodeID, error) { // Disconnected unmarks a peer as connected, allowing it to be dialed or // accepted again as appropriate. -func (m *PeerManager) Disconnected(peerID types.NodeID) { +func (m *PeerManager) Disconnected(peerID p2p.NodeID) { m.mtx.Lock() defer m.mtx.Unlock() @@ -785,7 +785,7 @@ func (m *PeerManager) Disconnected(peerID types.NodeID) { // // FIXME: This will cause the peer manager to immediately try to reconnect to // the peer, which is probably not always what we want. -func (m *PeerManager) Errored(peerID types.NodeID, err error) { +func (m *PeerManager) Errored(peerID p2p.NodeID, err error) { m.mtx.Lock() defer m.mtx.Unlock() @@ -800,7 +800,7 @@ func (m *PeerManager) Errored(peerID types.NodeID, err error) { // // FIXME: This is fairly naïve and only returns the addresses of the // highest-ranked peers. -func (m *PeerManager) Advertise(peerID types.NodeID, limit uint16) []NodeAddress { +func (m *PeerManager) Advertise(peerID p2p.NodeID, limit uint16) []NodeAddress { m.mtx.Lock() defer m.mtx.Unlock() @@ -925,7 +925,7 @@ func (m *PeerManager) Close() { // Addresses returns all known addresses for a peer, primarily for testing. // The order is arbitrary. -func (m *PeerManager) Addresses(peerID types.NodeID) []NodeAddress { +func (m *PeerManager) Addresses(peerID p2p.NodeID) []NodeAddress { m.mtx.Lock() defer m.mtx.Unlock() @@ -939,11 +939,11 @@ func (m *PeerManager) Addresses(peerID types.NodeID) []NodeAddress { } // Peers returns all known peers, primarily for testing. The order is arbitrary. -func (m *PeerManager) Peers() []types.NodeID { +func (m *PeerManager) Peers() []p2p.NodeID { m.mtx.Lock() defer m.mtx.Unlock() - peers := []types.NodeID{} + peers := []p2p.NodeID{} for _, peer := range m.store.Ranked() { peers = append(peers, peer.ID) } @@ -951,11 +951,11 @@ func (m *PeerManager) Peers() []types.NodeID { } // Scores returns the peer scores for all known peers, primarily for testing. -func (m *PeerManager) Scores() map[types.NodeID]PeerScore { +func (m *PeerManager) Scores() map[p2p.NodeID]PeerScore { m.mtx.Lock() defer m.mtx.Unlock() - scores := map[types.NodeID]PeerScore{} + scores := map[p2p.NodeID]PeerScore{} for _, peer := range m.store.Ranked() { scores[peer.ID] = peer.Score() } @@ -963,7 +963,7 @@ func (m *PeerManager) Scores() map[types.NodeID]PeerScore { } // Status returns the status for a peer, primarily for testing. -func (m *PeerManager) Status(id types.NodeID) PeerStatus { +func (m *PeerManager) Status(id p2p.NodeID) PeerStatus { m.mtx.Lock() defer m.mtx.Unlock() switch { @@ -978,7 +978,7 @@ func (m *PeerManager) Status(id types.NodeID) PeerStatus { // to make room for the given peer. Returns an empty ID if none is found. // If the peer is already being upgraded to, we return that same upgrade. // The caller must hold the mutex lock. -func (m *PeerManager) findUpgradeCandidate(id types.NodeID, score PeerScore) types.NodeID { +func (m *PeerManager) findUpgradeCandidate(id p2p.NodeID, score PeerScore) p2p.NodeID { for from, to := range m.upgrading { if to == id { return from @@ -1034,7 +1034,7 @@ func (m *PeerManager) retryDelay(failures uint32, persistent bool) time.Duration // FIXME: This is a temporary workaround to share state between the consensus // and mempool reactors, carried over from the legacy P2P stack. Reactors should // not have dependencies on each other, instead tracking this themselves. -func (m *PeerManager) GetHeight(peerID types.NodeID) int64 { +func (m *PeerManager) GetHeight(peerID p2p.NodeID) int64 { m.mtx.Lock() defer m.mtx.Unlock() @@ -1047,7 +1047,7 @@ func (m *PeerManager) GetHeight(peerID types.NodeID) int64 { // FIXME: This is a temporary workaround to share state between the consensus // and mempool reactors, carried over from the legacy P2P stack. Reactors should // not have dependencies on each other, instead tracking this themselves. -func (m *PeerManager) SetHeight(peerID types.NodeID, height int64) error { +func (m *PeerManager) SetHeight(peerID p2p.NodeID, height int64) error { m.mtx.Lock() defer m.mtx.Unlock() @@ -1068,7 +1068,7 @@ func (m *PeerManager) SetHeight(peerID types.NodeID, height int64) error { // (without fsync, since we can afford to lose recent writes). type peerStore struct { db dbm.DB - peers map[types.NodeID]*peerInfo + peers map[p2p.NodeID]*peerInfo ranked []*peerInfo // cache for Ranked(), nil invalidates cache } @@ -1087,7 +1087,7 @@ func newPeerStore(db dbm.DB) (*peerStore, error) { // loadPeers loads all peers from the database into memory. func (s *peerStore) loadPeers() error { - peers := map[types.NodeID]*peerInfo{} + peers := map[p2p.NodeID]*peerInfo{} start, end := keyPeerInfoRange() iter, err := s.db.Iterator(start, end) @@ -1118,7 +1118,7 @@ func (s *peerStore) loadPeers() error { // Get fetches a peer. The boolean indicates whether the peer existed or not. // The returned peer info is a copy, and can be mutated at will. -func (s *peerStore) Get(id types.NodeID) (peerInfo, bool) { +func (s *peerStore) Get(id p2p.NodeID) (peerInfo, bool) { peer, ok := s.peers[id] return peer.Copy(), ok } @@ -1156,7 +1156,7 @@ func (s *peerStore) Set(peer peerInfo) error { } // Delete deletes a peer, or does nothing if it does not exist. -func (s *peerStore) Delete(id types.NodeID) error { +func (s *peerStore) Delete(id p2p.NodeID) error { if _, ok := s.peers[id]; !ok { return nil } @@ -1214,7 +1214,7 @@ func (s *peerStore) Size() int { // peerInfo contains peer information stored in a peerStore. type peerInfo struct { - ID types.NodeID + ID p2p.NodeID AddressInfo map[NodeAddress]*peerAddressInfo LastConnected time.Time @@ -1230,7 +1230,7 @@ type peerInfo struct { // erroring if the data is invalid. func peerInfoFromProto(msg *p2pproto.PeerInfo) (*peerInfo, error) { p := &peerInfo{ - ID: types.NodeID(msg.ID), + ID: p2p.NodeID(msg.ID), AddressInfo: map[NodeAddress]*peerAddressInfo{}, } if msg.LastConnected != nil { @@ -1367,7 +1367,7 @@ const ( ) // keyPeerInfo generates a peerInfo database key. -func keyPeerInfo(id types.NodeID) []byte { +func keyPeerInfo(id p2p.NodeID) []byte { key, err := orderedcode.Append(nil, prefixPeerInfo, string(id)) if err != nil { panic(err) diff --git a/internal/p2p/peermanager_scoring_test.go b/internal/p2p/peermanager_scoring_test.go index 0825af948..7eaf845fc 100644 --- a/internal/p2p/peermanager_scoring_test.go +++ b/internal/p2p/peermanager_scoring_test.go @@ -7,14 +7,14 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/ed25519" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" dbm "github.com/tendermint/tm-db" ) func TestPeerScoring(t *testing.T) { // coppied from p2p_test shared variables selfKey := ed25519.GenPrivKeyFromSecret([]byte{0xf9, 0x1b, 0x08, 0xaa, 0x38, 0xee, 0x34, 0xdd}) - selfID := types.NodeIDFromPubKey(selfKey.PubKey()) + selfID := p2p.NodeIDFromPubKey(selfKey.PubKey()) // create a mock peer manager db := dbm.NewMemDB() @@ -23,7 +23,7 @@ func TestPeerScoring(t *testing.T) { defer peerManager.Close() // create a fake node - id := types.NodeID(strings.Repeat("a1", 20)) + id := p2p.NodeID(strings.Repeat("a1", 20)) added, err := peerManager.Add(NodeAddress{NodeID: id, Protocol: "memory"}) require.NoError(t, err) require.True(t, added) diff --git a/internal/p2p/peermanager_test.go b/internal/p2p/peermanager_test.go index 69c798d2d..15dfd71a5 100644 --- a/internal/p2p/peermanager_test.go +++ b/internal/p2p/peermanager_test.go @@ -12,7 +12,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // FIXME: We should probably have some randomized property-based tests for the @@ -22,7 +22,7 @@ import ( // tests. func TestPeerManagerOptions_Validate(t *testing.T) { - nodeID := types.NodeID("00112233445566778899aabbccddeeff00112233") + nodeID := p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233") testcases := map[string]struct { options p2p.PeerManagerOptions @@ -32,24 +32,24 @@ func TestPeerManagerOptions_Validate(t *testing.T) { // PersistentPeers "valid PersistentPeers NodeID": {p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{"00112233445566778899aabbccddeeff00112233"}, + PersistentPeers: []p2ptypes.NodeID{"00112233445566778899aabbccddeeff00112233"}, }, true}, "invalid PersistentPeers NodeID": {p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{"foo"}, + PersistentPeers: []p2ptypes.NodeID{"foo"}, }, false}, "uppercase PersistentPeers NodeID": {p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{"00112233445566778899AABBCCDDEEFF00112233"}, + PersistentPeers: []p2ptypes.NodeID{"00112233445566778899AABBCCDDEEFF00112233"}, }, false}, "PersistentPeers at MaxConnected": {p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{nodeID, nodeID, nodeID}, + PersistentPeers: []p2ptypes.NodeID{nodeID, nodeID, nodeID}, MaxConnected: 3, }, true}, "PersistentPeers above MaxConnected": {p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{nodeID, nodeID, nodeID}, + PersistentPeers: []p2ptypes.NodeID{nodeID, nodeID, nodeID}, MaxConnected: 2, }, false}, "PersistentPeers above MaxConnected below MaxConnectedUpgrade": {p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{nodeID, nodeID, nodeID}, + PersistentPeers: []p2ptypes.NodeID{nodeID, nodeID, nodeID}, MaxConnected: 2, MaxConnectedUpgrade: 2, }, false}, @@ -115,7 +115,7 @@ func TestNewPeerManager(t *testing.T) { // Invalid options should error. _, err = p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{"foo"}, + PersistentPeers: []p2ptypes.NodeID{"foo"}, }) require.Error(t, err) @@ -129,19 +129,19 @@ func TestNewPeerManager(t *testing.T) { } func TestNewPeerManager_Persistence(t *testing.T) { - aID := types.NodeID(strings.Repeat("a", 40)) + aID := p2ptypes.NodeID(strings.Repeat("a", 40)) aAddresses := []p2p.NodeAddress{ {Protocol: "tcp", NodeID: aID, Hostname: "127.0.0.1", Port: 26657, Path: "/path"}, {Protocol: "memory", NodeID: aID}, } - bID := types.NodeID(strings.Repeat("b", 40)) + bID := p2ptypes.NodeID(strings.Repeat("b", 40)) bAddresses := []p2p.NodeAddress{ {Protocol: "tcp", NodeID: bID, Hostname: "b10c::1", Port: 26657, Path: "/path"}, {Protocol: "memory", NodeID: bID}, } - cID := types.NodeID(strings.Repeat("c", 40)) + cID := p2ptypes.NodeID(strings.Repeat("c", 40)) cAddresses := []p2p.NodeAddress{ {Protocol: "tcp", NodeID: cID, Hostname: "host.domain", Port: 80}, {Protocol: "memory", NodeID: cID}, @@ -150,8 +150,8 @@ func TestNewPeerManager_Persistence(t *testing.T) { // Create an initial peer manager and add the peers. db := dbm.NewMemDB() peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{aID}, - PeerScores: map[types.NodeID]p2p.PeerScore{bID: 1}, + PersistentPeers: []p2ptypes.NodeID{aID}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{bID: 1}, }) require.NoError(t, err) defer peerManager.Close() @@ -165,7 +165,7 @@ func TestNewPeerManager_Persistence(t *testing.T) { require.ElementsMatch(t, aAddresses, peerManager.Addresses(aID)) require.ElementsMatch(t, bAddresses, peerManager.Addresses(bID)) require.ElementsMatch(t, cAddresses, peerManager.Addresses(cID)) - require.Equal(t, map[types.NodeID]p2p.PeerScore{ + require.Equal(t, map[p2ptypes.NodeID]p2p.PeerScore{ aID: p2p.PeerScorePersistent, bID: 1, cID: 0, @@ -177,8 +177,8 @@ func TestNewPeerManager_Persistence(t *testing.T) { // peers, but they should have updated scores from the new PersistentPeers // configuration. peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{bID}, - PeerScores: map[types.NodeID]p2p.PeerScore{cID: 1}, + PersistentPeers: []p2ptypes.NodeID{bID}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{cID: 1}, }) require.NoError(t, err) defer peerManager.Close() @@ -186,7 +186,7 @@ func TestNewPeerManager_Persistence(t *testing.T) { require.ElementsMatch(t, aAddresses, peerManager.Addresses(aID)) require.ElementsMatch(t, bAddresses, peerManager.Addresses(bID)) require.ElementsMatch(t, cAddresses, peerManager.Addresses(cID)) - require.Equal(t, map[types.NodeID]p2p.PeerScore{ + require.Equal(t, map[p2ptypes.NodeID]p2p.PeerScore{ aID: 0, bID: p2p.PeerScorePersistent, cID: 1, @@ -194,8 +194,8 @@ func TestNewPeerManager_Persistence(t *testing.T) { } func TestNewPeerManager_SelfIDChange(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} db := dbm.NewMemDB() peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) @@ -207,23 +207,23 @@ func TestNewPeerManager_SelfIDChange(t *testing.T) { added, err = peerManager.Add(b) require.NoError(t, err) require.True(t, added) - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) + require.ElementsMatch(t, []p2ptypes.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) peerManager.Close() // If we change our selfID to one of the peers in the peer store, it // should be removed from the store. peerManager, err = p2p.NewPeerManager(a.NodeID, db, p2p.PeerManagerOptions{}) require.NoError(t, err) - require.Equal(t, []types.NodeID{b.NodeID}, peerManager.Peers()) + require.Equal(t, []p2ptypes.NodeID{b.NodeID}, peerManager.Peers()) } func TestPeerManager_Add(t *testing.T) { - aID := types.NodeID(strings.Repeat("a", 40)) - bID := types.NodeID(strings.Repeat("b", 40)) - cID := types.NodeID(strings.Repeat("c", 40)) + aID := p2ptypes.NodeID(strings.Repeat("a", 40)) + bID := p2ptypes.NodeID(strings.Repeat("b", 40)) + cID := p2ptypes.NodeID(strings.Repeat("c", 40)) peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PersistentPeers: []types.NodeID{aID, cID}, + PersistentPeers: []p2ptypes.NodeID{aID, cID}, MaxPeers: 2, MaxConnected: 2, }) @@ -261,7 +261,7 @@ func TestPeerManager_Add(t *testing.T) { Protocol: "tcp", NodeID: cID, Hostname: "localhost"}) require.NoError(t, err) require.True(t, added) - require.ElementsMatch(t, []types.NodeID{aID, cID}, peerManager.Peers()) + require.ElementsMatch(t, []p2ptypes.NodeID{aID, cID}, peerManager.Peers()) // Adding an invalid address should error. _, err = peerManager.Add(p2p.NodeAddress{Path: "foo"}) @@ -273,7 +273,7 @@ func TestPeerManager_Add(t *testing.T) { } func TestPeerManager_DialNext(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -296,7 +296,7 @@ func TestPeerManager_DialNext(t *testing.T) { } func TestPeerManager_DialNext_Retry(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} options := p2p.PeerManagerOptions{ MinRetryTime: 100 * time.Millisecond, @@ -342,7 +342,7 @@ func TestPeerManager_DialNext_Retry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnAdd(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -369,8 +369,8 @@ func TestPeerManager_DialNext_WakeOnDialFailed(t *testing.T) { }) require.NoError(t, err) - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} // Add and dial a. added, err := peerManager.Add(a) @@ -407,7 +407,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), options) require.NoError(t, err) - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} // Add a, dial it, and mark it a failure. This will start a retry timer. added, err := peerManager.Add(a) @@ -430,7 +430,7 @@ func TestPeerManager_DialNext_WakeOnDialFailedRetry(t *testing.T) { } func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -458,9 +458,9 @@ func TestPeerManager_DialNext_WakeOnDisconnected(t *testing.T) { } func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 2, @@ -494,21 +494,21 @@ func TestPeerManager_TryDialNext_MaxConnected(t *testing.T) { } func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} - d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))} - e := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("e", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} + d := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("d", 40))} + e := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("e", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{ + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{ a.NodeID: 0, b.NodeID: 1, c.NodeID: 2, d.NodeID: 3, e.NodeID: 0, }, - PersistentPeers: []types.NodeID{c.NodeID, d.NodeID}, + PersistentPeers: []p2ptypes.NodeID{c.NodeID, d.NodeID}, MaxConnected: 2, MaxConnectedUpgrade: 1, }) @@ -578,12 +578,12 @@ func TestPeerManager_TryDialNext_MaxConnectedUpgrade(t *testing.T) { } func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, MaxConnected: 1, MaxConnectedUpgrade: 2, }) @@ -617,11 +617,11 @@ func TestPeerManager_TryDialNext_UpgradeReservesPeer(t *testing.T) { } func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { - aID := types.NodeID(strings.Repeat("a", 40)) + aID := p2ptypes.NodeID(strings.Repeat("a", 40)) a := p2p.NodeAddress{Protocol: "memory", NodeID: aID} aTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: aID, Hostname: "localhost"} - bID := types.NodeID(strings.Repeat("b", 40)) + bID := p2ptypes.NodeID(strings.Repeat("b", 40)) b := p2p.NodeAddress{Protocol: "memory", NodeID: bID} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ @@ -662,8 +662,8 @@ func TestPeerManager_TryDialNext_DialingConnected(t *testing.T) { } func TestPeerManager_TryDialNext_Multiple(t *testing.T) { - aID := types.NodeID(strings.Repeat("a", 40)) - bID := types.NodeID(strings.Repeat("b", 40)) + aID := p2ptypes.NodeID(strings.Repeat("a", 40)) + bID := p2ptypes.NodeID(strings.Repeat("b", 40)) addresses := []p2p.NodeAddress{ {Protocol: "memory", NodeID: aID}, {Protocol: "memory", NodeID: bID}, @@ -699,9 +699,9 @@ func TestPeerManager_TryDialNext_Multiple(t *testing.T) { func TestPeerManager_DialFailed(t *testing.T) { // DialFailed is tested through other tests, we'll just check a few basic // things here, e.g. reporting unknown addresses. - aID := types.NodeID(strings.Repeat("a", 40)) + aID := p2ptypes.NodeID(strings.Repeat("a", 40)) a := p2p.NodeAddress{Protocol: "memory", NodeID: aID} - bID := types.NodeID(strings.Repeat("b", 40)) + bID := p2ptypes.NodeID(strings.Repeat("b", 40)) b := p2p.NodeAddress{Protocol: "memory", NodeID: bID} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) @@ -731,16 +731,16 @@ func TestPeerManager_DialFailed(t *testing.T) { // DialFailed on an unknown peer shouldn't error or add it. require.NoError(t, peerManager.DialFailed(b)) - require.Equal(t, []types.NodeID{aID}, peerManager.Peers()) + require.Equal(t, []p2ptypes.NodeID{aID}, peerManager.Peers()) } func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, MaxConnected: 1, MaxConnectedUpgrade: 2, }) @@ -781,8 +781,8 @@ func TestPeerManager_DialFailed_UnreservePeer(t *testing.T) { } func TestPeerManager_Dialed_Connected(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -820,8 +820,8 @@ func TestPeerManager_Dialed_Self(t *testing.T) { } func TestPeerManager_Dialed_MaxConnected(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, @@ -848,15 +848,15 @@ func TestPeerManager_Dialed_MaxConnected(t *testing.T) { } func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} - d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} + d := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("d", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 2, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{c.NodeID: 1, d.NodeID: 1}, }) require.NoError(t, err) @@ -889,7 +889,7 @@ func TestPeerManager_Dialed_MaxConnectedUpgrade(t *testing.T) { } func TestPeerManager_Dialed_Unknown(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -899,14 +899,14 @@ func TestPeerManager_Dialed_Unknown(t *testing.T) { } func TestPeerManager_Dialed_Upgrade(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 2, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{b.NodeID: 1, c.NodeID: 1}, }) require.NoError(t, err) @@ -941,15 +941,15 @@ func TestPeerManager_Dialed_Upgrade(t *testing.T) { } func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} - d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} + d := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("d", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 2, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{ + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{ a.NodeID: 3, b.NodeID: 2, c.NodeID: 10, @@ -995,14 +995,14 @@ func TestPeerManager_Dialed_UpgradeEvenLower(t *testing.T) { } func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 2, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{ + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{ a.NodeID: 1, b.NodeID: 2, c.NodeID: 3, @@ -1042,10 +1042,10 @@ func TestPeerManager_Dialed_UpgradeNoEvict(t *testing.T) { } func TestPeerManager_Accepted(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} - d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} + d := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("d", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1064,7 +1064,7 @@ func TestPeerManager_Accepted(t *testing.T) { // Accepting a connection from an unknown peer should work and register it. require.NoError(t, peerManager.Accepted(b.NodeID)) - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) + require.ElementsMatch(t, []p2ptypes.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) // Accepting a connection from a peer that's being dialed should work, and // should cause the dial to fail. @@ -1089,9 +1089,9 @@ func TestPeerManager_Accepted(t *testing.T) { } func TestPeerManager_Accepted_MaxConnected(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 2, @@ -1117,13 +1117,13 @@ func TestPeerManager_Accepted_MaxConnected(t *testing.T) { } func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} - d := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("d", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} + d := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("d", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{ + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{ c.NodeID: 1, d.NodeID: 2, }, @@ -1163,12 +1163,12 @@ func TestPeerManager_Accepted_MaxConnectedUpgrade(t *testing.T) { } func TestPeerManager_Accepted_Upgrade(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{ + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{ b.NodeID: 1, c.NodeID: 1, }, @@ -1206,12 +1206,12 @@ func TestPeerManager_Accepted_Upgrade(t *testing.T) { } func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{ + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{ b.NodeID: 1, c.NodeID: 1, }, @@ -1253,8 +1253,8 @@ func TestPeerManager_Accepted_UpgradeDialing(t *testing.T) { } func TestPeerManager_Ready(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1289,7 +1289,7 @@ func TestPeerManager_Ready(t *testing.T) { // See TryEvictNext for most tests, this just tests blocking behavior. func TestPeerManager_EvictNext(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1322,7 +1322,7 @@ func TestPeerManager_EvictNext(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1348,13 +1348,13 @@ func TestPeerManager_EvictNext_WakeOnError(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{b.NodeID: 1}, }) require.NoError(t, err) @@ -1386,13 +1386,13 @@ func TestPeerManager_EvictNext_WakeOnUpgradeDialed(t *testing.T) { } func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MaxConnected: 1, MaxConnectedUpgrade: 1, - PeerScores: map[types.NodeID]p2p.PeerScore{b.NodeID: 1}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{b.NodeID: 1}, }) require.NoError(t, err) @@ -1417,7 +1417,7 @@ func TestPeerManager_EvictNext_WakeOnUpgradeAccepted(t *testing.T) { require.Equal(t, a.NodeID, evict) } func TestPeerManager_TryEvictNext(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1453,7 +1453,7 @@ func TestPeerManager_TryEvictNext(t *testing.T) { } func TestPeerManager_Disconnected(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1507,7 +1507,7 @@ func TestPeerManager_Disconnected(t *testing.T) { } func TestPeerManager_Errored(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1543,7 +1543,7 @@ func TestPeerManager_Errored(t *testing.T) { } func TestPeerManager_Subscribe(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1604,7 +1604,7 @@ func TestPeerManager_Subscribe(t *testing.T) { } func TestPeerManager_Subscribe_Close(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1631,7 +1631,7 @@ func TestPeerManager_Subscribe_Close(t *testing.T) { func TestPeerManager_Subscribe_Broadcast(t *testing.T) { t.Cleanup(leaktest.Check(t)) - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{}) require.NoError(t, err) @@ -1675,7 +1675,7 @@ func TestPeerManager_Close(t *testing.T) { // leaktest will check that spawned goroutines are closed. t.Cleanup(leaktest.CheckTimeout(t, 1*time.Second)) - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ MinRetryTime: 10 * time.Second, @@ -1701,23 +1701,23 @@ func TestPeerManager_Close(t *testing.T) { } func TestPeerManager_Advertise(t *testing.T) { - aID := types.NodeID(strings.Repeat("a", 40)) + aID := p2ptypes.NodeID(strings.Repeat("a", 40)) aTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: aID, Hostname: "127.0.0.1", Port: 26657, Path: "/path"} aMem := p2p.NodeAddress{Protocol: "memory", NodeID: aID} - bID := types.NodeID(strings.Repeat("b", 40)) + bID := p2ptypes.NodeID(strings.Repeat("b", 40)) bTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: bID, Hostname: "b10c::1", Port: 26657, Path: "/path"} bMem := p2p.NodeAddress{Protocol: "memory", NodeID: bID} - cID := types.NodeID(strings.Repeat("c", 40)) + cID := p2ptypes.NodeID(strings.Repeat("c", 40)) cTCP := p2p.NodeAddress{Protocol: "tcp", NodeID: cID, Hostname: "host.domain", Port: 80} cMem := p2p.NodeAddress{Protocol: "memory", NodeID: cID} - dID := types.NodeID(strings.Repeat("d", 40)) + dID := p2ptypes.NodeID(strings.Repeat("d", 40)) // Create an initial peer manager and add the peers. peerManager, err := p2p.NewPeerManager(selfID, dbm.NewMemDB(), p2p.PeerManagerOptions{ - PeerScores: map[types.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1}, + PeerScores: map[p2ptypes.NodeID]p2p.PeerScore{aID: 3, bID: 2, cID: 1}, }) require.NoError(t, err) defer peerManager.Close() @@ -1761,8 +1761,8 @@ func TestPeerManager_Advertise(t *testing.T) { } func TestPeerManager_SetHeight_GetHeight(t *testing.T) { - a := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "memory", NodeID: types.NodeID(strings.Repeat("b", 40))} + a := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "memory", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} db := dbm.NewMemDB() peerManager, err := p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) @@ -1781,17 +1781,17 @@ func TestPeerManager_SetHeight_GetHeight(t *testing.T) { require.EqualValues(t, 3, peerManager.GetHeight(a.NodeID)) // Setting a height should add an unknown node. - require.Equal(t, []types.NodeID{a.NodeID}, peerManager.Peers()) + require.Equal(t, []p2ptypes.NodeID{a.NodeID}, peerManager.Peers()) require.NoError(t, peerManager.SetHeight(b.NodeID, 7)) require.EqualValues(t, 7, peerManager.GetHeight(b.NodeID)) - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) + require.ElementsMatch(t, []p2ptypes.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) // The heights should not be persisted. peerManager.Close() peerManager, err = p2p.NewPeerManager(selfID, db, p2p.PeerManagerOptions{}) require.NoError(t, err) - require.ElementsMatch(t, []types.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) + require.ElementsMatch(t, []p2ptypes.NodeID{a.NodeID, b.NodeID}, peerManager.Peers()) require.Zero(t, peerManager.GetHeight(a.NodeID)) require.Zero(t, peerManager.GetHeight(b.NodeID)) } diff --git a/internal/p2p/pex/addrbook.go b/internal/p2p/pex/addrbook.go index 6c5f78663..f6814f992 100644 --- a/internal/p2p/pex/addrbook.go +++ b/internal/p2p/pex/addrbook.go @@ -21,7 +21,7 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) const ( @@ -60,7 +60,7 @@ type AddrBook interface { PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress // Mark address - MarkGood(types.NodeID) + MarkGood(p2ptypes.NodeID) MarkAttempt(*p2p.NetAddress) MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list // Add bad peers back to addrBook @@ -90,9 +90,9 @@ type addrBook struct { // accessed concurrently mtx tmsync.Mutex ourAddrs map[string]struct{} - privateIDs map[types.NodeID]struct{} - addrLookup map[types.NodeID]*knownAddress // new & old - badPeers map[types.NodeID]*knownAddress // blacklisted peers + privateIDs map[p2ptypes.NodeID]struct{} + addrLookup map[p2ptypes.NodeID]*knownAddress // new & old + badPeers map[p2ptypes.NodeID]*knownAddress // blacklisted peers bucketsOld []map[string]*knownAddress bucketsNew []map[string]*knownAddress nOld int @@ -121,9 +121,9 @@ func mustNewHasher() hash.Hash64 { func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { am := &addrBook{ ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), + privateIDs: make(map[p2ptypes.NodeID]struct{}), + addrLookup: make(map[p2ptypes.NodeID]*knownAddress), + badPeers: make(map[p2ptypes.NodeID]*knownAddress), filePath: filePath, routabilityStrict: routabilityStrict, } @@ -202,7 +202,7 @@ func (a *addrBook) AddPrivateIDs(ids []string) { defer a.mtx.Unlock() for _, id := range ids { - a.privateIDs[types.NodeID(id)] = struct{}{} + a.privateIDs[p2ptypes.NodeID(id)] = struct{}{} } } @@ -320,7 +320,7 @@ func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { // MarkGood implements AddrBook - it marks the peer as good and // moves it into an "old" bucket. -func (a *addrBook) MarkGood(id types.NodeID) { +func (a *addrBook) MarkGood(id p2ptypes.NodeID) { a.mtx.Lock() defer a.mtx.Unlock() diff --git a/internal/p2p/pex/addrbook_test.go b/internal/p2p/pex/addrbook_test.go index 3d21314ad..1e3836ba6 100644 --- a/internal/p2p/pex/addrbook_test.go +++ b/internal/p2p/pex/addrbook_test.go @@ -18,7 +18,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // FIXME These tests should not rely on .(*addrBook) assertions @@ -195,9 +195,9 @@ func randIPv4Address(t *testing.T) *p2p.NetAddress { mrand.Intn(255), ) port := mrand.Intn(65535-1) + 1 - id := types.NodeID(hex.EncodeToString(tmrand.Bytes(types.NodeIDByteLength))) + id := p2ptypes.NodeID(hex.EncodeToString(tmrand.Bytes(p2ptypes.NodeIDByteLength))) idAddr := id.AddressString(fmt.Sprintf("%v:%v", ip, port)) - addr, err := types.NewNetAddressString(idAddr) + addr, err := p2ptypes.NewNetAddressString(idAddr) assert.Nil(t, err, "error generating rand network address") if addr.Routable() { return addr @@ -580,13 +580,13 @@ func TestAddrBookAddDoesNotOverwriteOldIP(t *testing.T) { // to ensure we aren't in a case that got probabilistically ignored numOverrideAttempts := 10 - peerRealAddr, err := types.NewNetAddressString(peerID + "@" + peerRealIP) + peerRealAddr, err := p2ptypes.NewNetAddressString(peerID + "@" + peerRealIP) require.Nil(t, err) - peerOverrideAttemptAddr, err := types.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) + peerOverrideAttemptAddr, err := p2ptypes.NewNetAddressString(peerID + "@" + peerOverrideAttemptIP) require.Nil(t, err) - src, err := types.NewNetAddressString(SrcAddr) + src, err := p2ptypes.NewNetAddressString(SrcAddr) require.Nil(t, err) book := NewAddrBook(fname, true) @@ -650,7 +650,7 @@ func TestAddrBookGroupKey(t *testing.T) { for i, tc := range testCases { nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), false) + key := groupKeyFor(p2ptypes.NewNetAddressIPPort(nip, 26656), false) assert.Equal(t, tc.expKey, key, "#%d", i) } @@ -680,7 +680,7 @@ func TestAddrBookGroupKey(t *testing.T) { for i, tc := range testCases { nip := net.ParseIP(tc.ip) - key := groupKeyFor(types.NewNetAddressIPPort(nip, 26656), true) + key := groupKeyFor(p2ptypes.NewNetAddressIPPort(nip, 26656), true) assert.Equal(t, tc.expKey, key, "#%d", i) } } diff --git a/internal/p2p/pex/bench_test.go b/internal/p2p/pex/bench_test.go index 37019f60a..b441ef45f 100644 --- a/internal/p2p/pex/bench_test.go +++ b/internal/p2p/pex/bench_test.go @@ -3,15 +3,15 @@ package pex import ( "testing" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) func BenchmarkAddrBook_hash(b *testing.B) { book := &addrBook{ ourAddrs: make(map[string]struct{}), - privateIDs: make(map[types.NodeID]struct{}), - addrLookup: make(map[types.NodeID]*knownAddress), - badPeers: make(map[types.NodeID]*knownAddress), + privateIDs: make(map[p2p.NodeID]struct{}), + addrLookup: make(map[p2p.NodeID]*knownAddress), + badPeers: make(map[p2p.NodeID]*knownAddress), filePath: "", routabilityStrict: true, } diff --git a/internal/p2p/pex/known_address.go b/internal/p2p/pex/known_address.go index 2a2ebe038..65194c67d 100644 --- a/internal/p2p/pex/known_address.go +++ b/internal/p2p/pex/known_address.go @@ -4,7 +4,7 @@ import ( "time" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // knownAddress tracks information about a known network address @@ -31,7 +31,7 @@ func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { } } -func (ka *knownAddress) ID() types.NodeID { +func (ka *knownAddress) ID() p2ptypes.NodeID { return ka.Addr.ID } diff --git a/internal/p2p/pex/pex_reactor.go b/internal/p2p/pex/pex_reactor.go index 049dbd9f1..fcae62296 100644 --- a/internal/p2p/pex/pex_reactor.go +++ b/internal/p2p/pex/pex_reactor.go @@ -15,8 +15,8 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) type Peer = p2p.Peer @@ -99,7 +99,7 @@ type Reactor struct { attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} // seed/crawled mode fields - crawlPeerInfos map[types.NodeID]crawlPeerInfo + crawlPeerInfos map[p2ptypes.NodeID]crawlPeerInfo } func (r *Reactor) minReceiveRequestInterval() time.Duration { @@ -139,7 +139,7 @@ func NewReactor(b AddrBook, config *ReactorConfig) *Reactor { ensurePeersPeriod: defaultEnsurePeersPeriod, requestsSent: cmap.NewCMap(), lastReceivedRequests: cmap.NewCMap(), - crawlPeerInfos: make(map[types.NodeID]crawlPeerInfo), + crawlPeerInfos: make(map[p2ptypes.NodeID]crawlPeerInfo), } r.BaseReactor = *p2p.NewBaseReactor("PEX", r) return r @@ -479,7 +479,7 @@ func (r *Reactor) ensurePeers() { // NOTE: range here is [10, 90]. Too high ? newBias := tmmath.MinInt(out, 8)*10 + 10 - toDial := make(map[types.NodeID]*p2p.NetAddress) + toDial := make(map[p2ptypes.NodeID]*p2p.NetAddress) // Try maxAttempts times to pick numToDial addresses to dial maxAttempts := numToDial * 3 @@ -617,7 +617,7 @@ func (r *Reactor) checkSeeds() (numOnline int, netAddrs []*p2p.NetAddress, err e numOnline = lSeeds - len(errs) for _, err := range errs { switch e := err.(type) { - case types.ErrNetAddressLookup: + case p2ptypes.ErrNetAddressLookup: r.Logger.Error("Connecting to seed failed", "err", e) default: return 0, nil, fmt.Errorf("seed node configuration has error: %w", e) @@ -819,7 +819,7 @@ func decodeMsg(bz []byte) (proto.Message, error) { // address converters // NetAddressFromProto converts a Protobuf PexAddress into a native struct. -func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) { +func NetAddressFromProto(pb tmp2p.PexAddress) (*p2ptypes.NetAddress, error) { ip := net.ParseIP(pb.IP) if ip == nil { return nil, fmt.Errorf("invalid IP address %v", pb.IP) @@ -827,16 +827,16 @@ func NetAddressFromProto(pb tmp2p.PexAddress) (*types.NetAddress, error) { if pb.Port >= 1<<16 { return nil, fmt.Errorf("invalid port number %v", pb.Port) } - return &types.NetAddress{ - ID: types.NodeID(pb.ID), + return &p2ptypes.NetAddress{ + ID: p2ptypes.NodeID(pb.ID), IP: ip, Port: uint16(pb.Port), }, nil } // NetAddressesFromProto converts a slice of Protobuf PexAddresses into a native slice. -func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) { - nas := make([]*types.NetAddress, 0, len(pbs)) +func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*p2ptypes.NetAddress, error) { + nas := make([]*p2ptypes.NetAddress, 0, len(pbs)) for _, pb := range pbs { na, err := NetAddressFromProto(pb) if err != nil { @@ -848,7 +848,7 @@ func NetAddressesFromProto(pbs []tmp2p.PexAddress) ([]*types.NetAddress, error) } // NetAddressesToProto converts a slice of NetAddresses into a Protobuf PexAddress slice. -func NetAddressesToProto(nas []*types.NetAddress) []tmp2p.PexAddress { +func NetAddressesToProto(nas []*p2ptypes.NetAddress) []tmp2p.PexAddress { pbs := make([]tmp2p.PexAddress, 0, len(nas)) for _, na := range nas { if na != nil { diff --git a/internal/p2p/pex/pex_reactor_test.go b/internal/p2p/pex/pex_reactor_test.go index 56f24457f..5ffaae919 100644 --- a/internal/p2p/pex/pex_reactor_test.go +++ b/internal/p2p/pex/pex_reactor_test.go @@ -17,8 +17,8 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/mock" "github.com/tendermint/tendermint/libs/log" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) var ( @@ -129,7 +129,7 @@ func TestPEXReactorReceive(t *testing.T) { size := book.Size() na, err := peer.NodeInfo().NetAddress() require.NoError(t, err) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) + msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*p2ptypes.NetAddress{na})}) r.Receive(PexChannel, peer, msg) assert.Equal(t, size+1, book.Size()) @@ -186,7 +186,7 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.True(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{peer.SocketAddr()})}) + msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*p2ptypes.NetAddress{peer.SocketAddr()})}) // receive some addrs. should clear the request r.Receive(PexChannel, peer, msg) @@ -459,7 +459,7 @@ func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { na, err := peer.NodeInfo().NetAddress() require.NoError(t, err) - msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*types.NetAddress{na})}) + msg := mustEncode(&tmp2p.PexResponse{Addresses: NetAddressesToProto([]*p2ptypes.NetAddress{na})}) pexR.Receive(PexChannel, peer, msg) assert.Equal(t, size, book.Size()) diff --git a/internal/p2p/pex/reactor.go b/internal/p2p/pex/reactor.go index 8cff2f95b..05e935110 100644 --- a/internal/p2p/pex/reactor.go +++ b/internal/p2p/pex/reactor.go @@ -12,8 +12,8 @@ import ( "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) var ( @@ -79,7 +79,7 @@ type ReactorV2 struct { closeCh chan struct{} // list of available peers to loop through and send peer requests to - availablePeers map[types.NodeID]struct{} + availablePeers map[p2ptypes.NodeID]struct{} mtx sync.RWMutex @@ -87,12 +87,12 @@ type ReactorV2 struct { // to. This prevents the sending of spurious responses. // NOTE: If a node never responds, they will remain in this map until a // peer down status update is sent - requestsSent map[types.NodeID]struct{} + requestsSent map[p2ptypes.NodeID]struct{} // lastReceivedRequests keeps track of when peers send a request to prevent // peers from sending requests too often (as defined by // minReceiveRequestInterval). - lastReceivedRequests map[types.NodeID]time.Time + lastReceivedRequests map[p2ptypes.NodeID]time.Time // the time when another request will be sent nextRequestTime time.Time @@ -121,9 +121,9 @@ func NewReactorV2( pexCh: pexCh, peerUpdates: peerUpdates, closeCh: make(chan struct{}), - availablePeers: make(map[types.NodeID]struct{}), - requestsSent: make(map[types.NodeID]struct{}), - lastReceivedRequests: make(map[types.NodeID]time.Time), + availablePeers: make(map[p2ptypes.NodeID]struct{}), + requestsSent: make(map[p2ptypes.NodeID]struct{}), + lastReceivedRequests: make(map[p2ptypes.NodeID]time.Time), } r.BaseService = *service.NewBaseService(logger, "PEX", r) @@ -426,7 +426,7 @@ func (r *ReactorV2) sendRequestForPeers() { return } - var peerID types.NodeID + var peerID p2ptypes.NodeID // use range to get a random peer. for peerID = range r.availablePeers { @@ -500,7 +500,7 @@ func (r *ReactorV2) calculateNextRequestTime() { r.nextRequestTime = time.Now().Add(baseTime * time.Duration(r.discoveryRatio)) } -func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { +func (r *ReactorV2) markPeerRequest(peer p2ptypes.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() if lastRequestTime, ok := r.lastReceivedRequests[peer]; ok { @@ -513,7 +513,7 @@ func (r *ReactorV2) markPeerRequest(peer types.NodeID) error { return nil } -func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { +func (r *ReactorV2) markPeerResponse(peer p2ptypes.NodeID) error { r.mtx.Lock() defer r.mtx.Unlock() // check if a request to this peer was sent @@ -530,7 +530,7 @@ func (r *ReactorV2) markPeerResponse(peer types.NodeID) error { // all addresses must use a MCONN protocol for the peer to be considered part of the // legacy p2p pex system -func (r *ReactorV2) isLegacyPeer(peer types.NodeID) bool { +func (r *ReactorV2) isLegacyPeer(peer p2ptypes.NodeID) bool { for _, addr := range r.peerManager.Addresses(peer) { if addr.Protocol != p2p.MConnProtocol { return false diff --git a/internal/p2p/pex/reactor_test.go b/internal/p2p/pex/reactor_test.go index a5acb0d5e..7a7ad74ac 100644 --- a/internal/p2p/pex/reactor_test.go +++ b/internal/p2p/pex/reactor_test.go @@ -14,8 +14,8 @@ import ( "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/libs/log" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" proto "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) const ( @@ -327,14 +327,14 @@ type reactorTestSuite struct { network *p2ptest.Network logger log.Logger - reactors map[types.NodeID]*pex.ReactorV2 - pexChannels map[types.NodeID]*p2p.Channel + reactors map[p2ptypes.NodeID]*pex.ReactorV2 + pexChannels map[p2ptypes.NodeID]*p2p.Channel - peerChans map[types.NodeID]chan p2p.PeerUpdate - peerUpdates map[types.NodeID]*p2p.PeerUpdates + peerChans map[p2ptypes.NodeID]chan p2p.PeerUpdate + peerUpdates map[p2ptypes.NodeID]*p2p.PeerUpdates - nodes []types.NodeID - mocks []types.NodeID + nodes []p2ptypes.NodeID + mocks []p2ptypes.NodeID total int opts testOptions } @@ -370,10 +370,10 @@ func setupNetwork(t *testing.T, opts testOptions) *reactorTestSuite { rts := &reactorTestSuite{ logger: log.TestingLogger().With("testCase", t.Name()), network: p2ptest.MakeNetwork(t, networkOpts), - reactors: make(map[types.NodeID]*pex.ReactorV2, realNodes), - pexChannels: make(map[types.NodeID]*p2p.Channel, opts.TotalNodes), - peerChans: make(map[types.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), - peerUpdates: make(map[types.NodeID]*p2p.PeerUpdates, opts.TotalNodes), + reactors: make(map[p2ptypes.NodeID]*pex.ReactorV2, realNodes), + pexChannels: make(map[p2ptypes.NodeID]*p2p.Channel, opts.TotalNodes), + peerChans: make(map[p2ptypes.NodeID]chan p2p.PeerUpdate, opts.TotalNodes), + peerUpdates: make(map[p2ptypes.NodeID]*p2p.PeerUpdates, opts.TotalNodes), total: opts.TotalNodes, opts: opts, } @@ -465,7 +465,7 @@ func (r *reactorTestSuite) addNodes(t *testing.T, nodes int) { func (r *reactorTestSuite) listenFor( t *testing.T, - node types.NodeID, + node p2ptypes.NodeID, conditional func(msg p2p.Envelope) bool, assertion func(t *testing.T, msg p2p.Envelope) bool, waitPeriod time.Duration, @@ -789,7 +789,7 @@ func (r *reactorTestSuite) pexAddresses(t *testing.T, nodeIndices []int) []proto return addresses } -func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (types.NodeID, types.NodeID) { +func (r *reactorTestSuite) checkNodePair(t *testing.T, first, second int) (p2ptypes.NodeID, p2ptypes.NodeID) { require.NotEqual(t, first, second) require.Less(t, first, r.total) require.Less(t, second, r.total) @@ -807,12 +807,12 @@ func (r *reactorTestSuite) addAddresses(t *testing.T, node int, addrs []int) { } } -func newNodeID(t *testing.T, id string) types.NodeID { - nodeID, err := types.NewNodeID(strings.Repeat(id, 2*types.NodeIDByteLength)) +func newNodeID(t *testing.T, id string) p2ptypes.NodeID { + nodeID, err := p2ptypes.NewNodeID(strings.Repeat(id, 2*p2ptypes.NodeIDByteLength)) require.NoError(t, err) return nodeID } -func randomNodeID(t *testing.T) types.NodeID { - return types.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey()) +func randomNodeID(t *testing.T) p2ptypes.NodeID { + return p2ptypes.NodeIDFromPubKey(ed25519.GenPrivKey().PubKey()) } diff --git a/internal/p2p/router.go b/internal/p2p/router.go index 1171566d1..4e5403687 100644 --- a/internal/p2p/router.go +++ b/internal/p2p/router.go @@ -16,7 +16,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) const queueBufferDefault = 32 @@ -26,8 +26,8 @@ type ChannelID uint16 // Envelope contains a message with sender/receiver routing info. type Envelope struct { - From types.NodeID // sender (empty if outbound) - To types.NodeID // receiver (empty if inbound) + From p2p.NodeID // sender (empty if outbound) + To p2p.NodeID // receiver (empty if inbound) Broadcast bool // send to all connected peers (ignores To) Message proto.Message // message payload @@ -52,7 +52,7 @@ type Envelope struct { // It should possibly also allow reactors to request explicit actions, e.g. // disconnection or banning, in addition to doing this based on aggregates. type PeerError struct { - NodeID types.NodeID + NodeID p2p.NodeID Err error } @@ -157,7 +157,7 @@ type RouterOptions struct { // but this occurs after the handshake is complete. Filter by // IP address to filter before the handshake. Functions should // return an error to reject the peer. - FilterPeerByID func(context.Context, types.NodeID) error + FilterPeerByID func(context.Context, p2p.NodeID) error // DialSleep controls the amount of time that the router // sleeps between dialing peers. If not set, a default value @@ -248,7 +248,7 @@ type Router struct { logger log.Logger metrics *Metrics options RouterOptions - nodeInfo types.NodeInfo + nodeInfo p2p.NodeInfo privKey crypto.PrivKey peerManager *PeerManager chDescs []ChannelDescriptor @@ -258,9 +258,9 @@ type Router struct { stopCh chan struct{} // signals Router shutdown peerMtx sync.RWMutex - peerQueues map[types.NodeID]queue // outbound messages per peer for all channels + peerQueues map[p2p.NodeID]queue // outbound messages per peer for all channels // the channels that the peer queue has open - peerChannels map[types.NodeID]channelIDs + peerChannels map[p2p.NodeID]channelIDs queueFactory func(int) queue // FIXME: We don't strictly need to use a mutex for this if we seal the @@ -277,7 +277,7 @@ type Router struct { func NewRouter( logger log.Logger, metrics *Metrics, - nodeInfo types.NodeInfo, + nodeInfo p2p.NodeInfo, privKey crypto.PrivKey, peerManager *PeerManager, transports []Transport, @@ -305,8 +305,8 @@ func NewRouter( stopCh: make(chan struct{}), channelQueues: map[ChannelID]queue{}, channelMessages: map[ChannelID]proto.Message{}, - peerQueues: map[types.NodeID]queue{}, - peerChannels: make(map[types.NodeID]channelIDs), + peerQueues: map[p2p.NodeID]queue{}, + peerChannels: make(map[p2p.NodeID]channelIDs), } router.BaseService = service.NewBaseService(logger, "router", router) @@ -533,7 +533,7 @@ func (r *Router) filterPeersIP(ctx context.Context, ip net.IP, port uint16) erro return r.options.FilterPeerByIP(ctx, ip, port) } -func (r *Router) filterPeersID(ctx context.Context, id types.NodeID) error { +func (r *Router) filterPeersID(ctx context.Context, id p2p.NodeID) error { if r.options.FilterPeerByID == nil { return nil } @@ -739,7 +739,7 @@ func (r *Router) connectPeer(ctx context.Context, address NodeAddress) { go r.routePeer(address.NodeID, conn, toChannelIDs(peerInfo.Channels)) } -func (r *Router) getOrMakeQueue(peerID types.NodeID, channels channelIDs) queue { +func (r *Router) getOrMakeQueue(peerID p2p.NodeID, channels channelIDs) queue { r.peerMtx.Lock() defer r.peerMtx.Unlock() @@ -808,8 +808,8 @@ func (r *Router) dialPeer(ctx context.Context, address NodeAddress) (Connection, func (r *Router) handshakePeer( ctx context.Context, conn Connection, - expectID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { + expectID p2p.NodeID, +) (p2p.NodeInfo, crypto.PubKey, error) { if r.options.HandshakeTimeout > 0 { var cancel context.CancelFunc @@ -824,9 +824,9 @@ func (r *Router) handshakePeer( if err = peerInfo.Validate(); err != nil { return peerInfo, peerKey, fmt.Errorf("invalid handshake NodeInfo: %w", err) } - if types.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { + if p2p.NodeIDFromPubKey(peerKey) != peerInfo.NodeID { return peerInfo, peerKey, fmt.Errorf("peer's public key did not match its node ID %q (expected %q)", - peerInfo.NodeID, types.NodeIDFromPubKey(peerKey)) + peerInfo.NodeID, p2p.NodeIDFromPubKey(peerKey)) } if expectID != "" && expectID != peerInfo.NodeID { return peerInfo, peerKey, fmt.Errorf("expected to connect with peer %q, got %q", @@ -851,7 +851,7 @@ func (r *Router) runWithPeerMutex(fn func() error) error { // routePeer routes inbound and outbound messages between a peer and the reactor // channels. It will close the given connection and send queue when done, or if // they are closed elsewhere it will cause this method to shut down and return. -func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels channelIDs) { +func (r *Router) routePeer(peerID p2p.NodeID, conn Connection, channels channelIDs) { r.metrics.Peers.Add(1) r.peerManager.Ready(peerID) @@ -901,7 +901,7 @@ func (r *Router) routePeer(peerID types.NodeID, conn Connection, channels channe // receivePeer receives inbound messages from a peer, deserializes them and // passes them on to the appropriate channel. -func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { +func (r *Router) receivePeer(peerID p2p.NodeID, conn Connection) error { for { chID, bz, err := conn.ReceiveMessage() if err != nil { @@ -952,7 +952,7 @@ func (r *Router) receivePeer(peerID types.NodeID, conn Connection) error { } // sendPeer sends queued messages to a peer. -func (r *Router) sendPeer(peerID types.NodeID, conn Connection, peerQueue queue) error { +func (r *Router) sendPeer(peerID p2p.NodeID, conn Connection, peerQueue queue) error { for { start := time.Now().UTC() @@ -1017,7 +1017,7 @@ func (r *Router) evictPeers() { } // NodeInfo returns a copy of the current NodeInfo. Used for testing. -func (r *Router) NodeInfo() types.NodeInfo { +func (r *Router) NodeInfo() p2p.NodeInfo { return r.nodeInfo.Copy() } diff --git a/internal/p2p/router_init_test.go b/internal/p2p/router_init_test.go index 3622c0cc1..9c0274086 100644 --- a/internal/p2p/router_init_test.go +++ b/internal/p2p/router_init_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) func TestRouter_ConstructQueueFactory(t *testing.T) { @@ -18,21 +18,21 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { t.Run("Default", func(t *testing.T) { require.Zero(t, os.Getenv("TM_P2P_QUEUE")) opts := RouterOptions{} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, p2p.NodeInfo{}, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Fifo", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypeFifo} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, p2p.NodeInfo{}, nil, nil, nil, opts) require.NoError(t, err) _, ok := r.queueFactory(1).(*fifoQueue) require.True(t, ok) }) t.Run("Priority", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypePriority} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, p2p.NodeInfo{}, nil, nil, nil, opts) require.NoError(t, err) q, ok := r.queueFactory(1).(*pqScheduler) require.True(t, ok) @@ -40,7 +40,7 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { }) t.Run("WDRR", func(t *testing.T) { opts := RouterOptions{QueueType: queueTypeWDRR} - r, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + r, err := NewRouter(log.NewNopLogger(), nil, p2p.NodeInfo{}, nil, nil, nil, opts) require.NoError(t, err) q, ok := r.queueFactory(1).(*wdrrScheduler) require.True(t, ok) @@ -48,7 +48,7 @@ func TestRouter_ConstructQueueFactory(t *testing.T) { }) t.Run("NonExistant", func(t *testing.T) { opts := RouterOptions{QueueType: "fast"} - _, err := NewRouter(log.NewNopLogger(), nil, types.NodeInfo{}, nil, nil, nil, opts) + _, err := NewRouter(log.NewNopLogger(), nil, p2p.NodeInfo{}, nil, nil, nil, opts) require.Error(t, err) require.Contains(t, err.Error(), "fast") }) diff --git a/internal/p2p/router_test.go b/internal/p2p/router_test.go index 436e3f004..16a78ca0f 100644 --- a/internal/p2p/router_test.go +++ b/internal/p2p/router_test.go @@ -24,7 +24,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p/mocks" "github.com/tendermint/tendermint/internal/p2p/p2ptest" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) func echoReactor(channel *p2p.Channel) { @@ -142,7 +142,7 @@ func TestRouter_Channel_Basic(t *testing.T) { // We should be able to send on the channel, even though there are no peers. p2ptest.RequireSend(t, channel, p2p.Envelope{ - To: types.NodeID(strings.Repeat("a", 40)), + To: p2ptypes.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "foo"}, }) @@ -185,7 +185,7 @@ func TestRouter_Channel_SendReceive(t *testing.T) { // Sending to an unknown peer should be dropped. p2ptest.RequireSend(t, a, p2p.Envelope{ - To: types.NodeID(strings.Repeat("a", 40)), + To: p2ptypes.NodeID(strings.Repeat("a", 40)), Message: &p2ptest.Message{Value: "a"}, }) p2ptest.RequireEmpty(t, a, b, c) @@ -324,16 +324,16 @@ func TestRouter_Channel_Error(t *testing.T) { func TestRouter_AcceptPeers(t *testing.T) { testcases := map[string]struct { - peerInfo types.NodeInfo + peerInfo p2ptypes.NodeInfo peerKey crypto.PubKey ok bool }{ "valid handshake": {peerInfo, peerKey.PubKey(), true}, - "empty handshake": {types.NodeInfo{}, nil, false}, + "empty handshake": {p2ptypes.NodeInfo{}, nil, false}, "invalid key": {peerInfo, selfKey.PubKey(), false}, "self handshake": {selfInfo, selfKey.PubKey(), false}, "incompatible peer": { - types.NodeInfo{ + p2ptypes.NodeInfo{ NodeID: peerID, ListenAddr: "0.0.0.0:0", Network: "other-network", @@ -493,7 +493,7 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). - WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF) + WaitUntil(closeCh).Return(p2ptypes.NodeInfo{}, nil, io.EOF) mockConnection.On("Close").Return(nil) mockConnection.On("RemoteEndpoint").Return(p2p.Endpoint{}) @@ -536,20 +536,20 @@ func TestRouter_AcceptPeers_HeadOfLineBlocking(t *testing.T) { func TestRouter_DialPeers(t *testing.T) { testcases := map[string]struct { - dialID types.NodeID - peerInfo types.NodeInfo + dialID p2ptypes.NodeID + peerInfo p2ptypes.NodeInfo peerKey crypto.PubKey dialErr error ok bool }{ "valid dial": {peerInfo.NodeID, peerInfo, peerKey.PubKey(), nil, true}, - "empty handshake": {peerInfo.NodeID, types.NodeInfo{}, nil, nil, false}, + "empty handshake": {peerInfo.NodeID, p2ptypes.NodeInfo{}, nil, nil, false}, "invalid key": {peerInfo.NodeID, peerInfo, selfKey.PubKey(), nil, false}, "unexpected node ID": {peerInfo.NodeID, selfInfo, selfKey.PubKey(), nil, false}, "dial error": {peerInfo.NodeID, peerInfo, peerKey.PubKey(), errors.New("boom"), false}, "incompatible peer": { peerInfo.NodeID, - types.NodeInfo{ + p2ptypes.NodeInfo{ NodeID: peerID, ListenAddr: "0.0.0.0:0", Network: "other-network", @@ -649,9 +649,9 @@ func TestRouter_DialPeers(t *testing.T) { func TestRouter_DialPeers_Parallel(t *testing.T) { t.Cleanup(leaktest.Check(t)) - a := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("a", 40))} - b := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("b", 40))} - c := p2p.NodeAddress{Protocol: "mock", NodeID: types.NodeID(strings.Repeat("c", 40))} + a := p2p.NodeAddress{Protocol: "mock", NodeID: p2ptypes.NodeID(strings.Repeat("a", 40))} + b := p2p.NodeAddress{Protocol: "mock", NodeID: p2ptypes.NodeID(strings.Repeat("b", 40))} + c := p2p.NodeAddress{Protocol: "mock", NodeID: p2ptypes.NodeID(strings.Repeat("c", 40))} // Set up a mock transport that returns a connection that blocks during the // handshake. It should dial all peers in parallel. @@ -661,7 +661,7 @@ func TestRouter_DialPeers_Parallel(t *testing.T) { mockConnection := &mocks.Connection{} mockConnection.On("String").Maybe().Return("mock") mockConnection.On("Handshake", mock.Anything, selfInfo, selfKey). - WaitUntil(closeCh).Return(types.NodeInfo{}, nil, io.EOF) + WaitUntil(closeCh).Return(p2ptypes.NodeInfo{}, nil, io.EOF) mockConnection.On("Close").Return(nil) mockTransport := &mocks.Transport{} @@ -799,7 +799,7 @@ func TestRouter_EvictPeers(t *testing.T) { func TestRouter_ChannelCompatability(t *testing.T) { t.Cleanup(leaktest.Check(t)) - incompatiblePeer := types.NodeInfo{ + incompatiblePeer := p2ptypes.NodeInfo{ NodeID: peerID, ListenAddr: "0.0.0.0:0", Network: "test", @@ -848,7 +848,7 @@ func TestRouter_ChannelCompatability(t *testing.T) { func TestRouter_DontSendOnInvalidChannel(t *testing.T) { t.Cleanup(leaktest.Check(t)) - peer := types.NodeInfo{ + peer := p2ptypes.NodeInfo{ NodeID: peerID, ListenAddr: "0.0.0.0:0", Network: "test", diff --git a/internal/p2p/shim_test.go b/internal/p2p/shim_test.go index d8b9e30c3..7c0fc4866 100644 --- a/internal/p2p/shim_test.go +++ b/internal/p2p/shim_test.go @@ -11,8 +11,8 @@ import ( "github.com/tendermint/tendermint/internal/p2p" p2pmocks "github.com/tendermint/tendermint/internal/p2p/mocks" "github.com/tendermint/tendermint/libs/log" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/types" ) var ( @@ -78,10 +78,10 @@ func setup(t *testing.T, peers []p2p.Peer) *reactorShimTestSuite { return rts } -func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, types.NodeID) { +func simplePeer(t *testing.T, id string) (*p2pmocks.Peer, p2ptypes.NodeID) { t.Helper() - peerID := types.NodeID(id) + peerID := p2ptypes.NodeID(id) peer := &p2pmocks.Peer{} peer.On("ID").Return(peerID) diff --git a/internal/p2p/switch.go b/internal/p2p/switch.go index eeb93a994..7bad0f1fc 100644 --- a/internal/p2p/switch.go +++ b/internal/p2p/switch.go @@ -16,7 +16,7 @@ import ( "github.com/tendermint/tendermint/libs/cmap" tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) const ( @@ -57,7 +57,7 @@ type AddrBook interface { AddPrivateIDs([]string) AddOurAddress(*NetAddress) OurAddress(*NetAddress) bool - MarkGood(types.NodeID) + MarkGood(p2p.NodeID) RemoveAddress(*NetAddress) HasAddress(*NetAddress) bool Save() @@ -103,12 +103,12 @@ type Switch struct { peers *PeerSet dialing *cmap.CMap reconnecting *cmap.CMap - nodeInfo types.NodeInfo // our node info - nodeKey types.NodeKey // our node privkey + nodeInfo p2p.NodeInfo // our node info + nodeKey p2p.NodeKey // our node privkey addrBook AddrBook // peers addresses with whom we'll maintain constant connection persistentPeersAddrs []*NetAddress - unconditionalPeerIDs map[types.NodeID]struct{} + unconditionalPeerIDs map[p2p.NodeID]struct{} transport Transport @@ -154,7 +154,7 @@ func NewSwitch( metrics: NopMetrics(), transport: transport, persistentPeersAddrs: make([]*NetAddress, 0), - unconditionalPeerIDs: make(map[types.NodeID]struct{}), + unconditionalPeerIDs: make(map[p2p.NodeID]struct{}), filterTimeout: defaultFilterTimeout, conns: NewConnSet(), } @@ -242,19 +242,19 @@ func (sw *Switch) Reactor(name string) Reactor { // SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. // NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo types.NodeInfo) { +func (sw *Switch) SetNodeInfo(nodeInfo p2p.NodeInfo) { sw.nodeInfo = nodeInfo } // NodeInfo returns the switch's NodeInfo. // NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() types.NodeInfo { +func (sw *Switch) NodeInfo() p2p.NodeInfo { return sw.nodeInfo } // SetNodeKey sets the switch's private key for authenticated encryption. // NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey types.NodeKey) { +func (sw *Switch) SetNodeKey(nodeKey p2p.NodeKey) { sw.nodeKey = nodeKey } @@ -353,7 +353,7 @@ func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { return } -func (sw *Switch) IsPeerUnconditional(id types.NodeID) bool { +func (sw *Switch) IsPeerUnconditional(id p2p.NodeID) bool { _, ok := sw.unconditionalPeerIDs[id] return ok } @@ -518,7 +518,7 @@ func (sw *Switch) DialPeersAsync(peers []string) error { } // return first non-ErrNetAddressLookup error for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { + if _, ok := err.(p2p.ErrNetAddressLookup); ok { continue } return err @@ -622,7 +622,7 @@ func (sw *Switch) AddPersistentPeers(addrs []string) error { } // return first non-ErrNetAddressLookup error for _, err := range errs { - if _, ok := err.(types.ErrNetAddressLookup); ok { + if _, ok := err.(p2p.ErrNetAddressLookup); ok { continue } return err @@ -634,11 +634,11 @@ func (sw *Switch) AddPersistentPeers(addrs []string) error { func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { sw.Logger.Info("Adding unconditional peer ids", "ids", ids) for i, id := range ids { - err := types.NodeID(id).Validate() + err := p2p.NodeID(id).Validate() if err != nil { return fmt.Errorf("wrong ID #%d: %w", i, err) } - sw.unconditionalPeerIDs[types.NodeID(id)] = struct{}{} + sw.unconditionalPeerIDs[p2p.NodeID(id)] = struct{}{} } return nil } @@ -646,7 +646,7 @@ func (sw *Switch) AddUnconditionalPeerIDs(ids []string) error { func (sw *Switch) AddPrivatePeerIDs(ids []string) error { validIDs := make([]string, 0, len(ids)) for i, id := range ids { - err := types.NodeID(id).Validate() + err := p2p.NodeID(id).Validate() if err != nil { return fmt.Errorf("wrong ID #%d: %w", i, err) } @@ -669,7 +669,7 @@ func (sw *Switch) IsPeerPersistent(na *NetAddress) bool { func (sw *Switch) acceptRoutine() { for { - var peerNodeInfo types.NodeInfo + var peerNodeInfo p2p.NodeInfo c, err := sw.transport.Accept() if err == nil { // NOTE: The legacy MConn transport did handshaking in Accept(), @@ -803,7 +803,7 @@ func (sw *Switch) addOutboundPeerWithConfig( ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() - var peerNodeInfo types.NodeInfo + var peerNodeInfo p2p.NodeInfo c, err := sw.transport.Dial(ctx, Endpoint{ Protocol: MConnProtocol, IP: addr.IP, @@ -864,8 +864,8 @@ func (sw *Switch) addOutboundPeerWithConfig( func (sw *Switch) handshakePeer( c Connection, - expectPeerID types.NodeID, -) (types.NodeInfo, crypto.PubKey, error) { + expectPeerID p2p.NodeID, +) (p2p.NodeInfo, crypto.PubKey, error) { // Moved from transport and hardcoded until legacy P2P stack removal. ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -889,7 +889,7 @@ func (sw *Switch) handshakePeer( // For outgoing conns, ensure connection key matches dialed key. if expectPeerID != "" { - peerID := types.NodeIDFromPubKey(peerKey) + peerID := p2p.NodeIDFromPubKey(peerKey) if expectPeerID != peerID { return peerInfo, peerKey, ErrRejected{ conn: c.(*mConnConnection).conn, @@ -906,7 +906,7 @@ func (sw *Switch) handshakePeer( if sw.nodeInfo.ID() == peerInfo.ID() { return peerInfo, peerKey, ErrRejected{ - addr: *types.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()), + addr: *p2p.NewNetAddress(peerInfo.ID(), c.(*mConnConnection).conn.RemoteAddr()), conn: c.(*mConnConnection).conn, id: peerInfo.ID(), isSelf: true, @@ -1054,7 +1054,7 @@ func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { netAddrs := make([]*NetAddress, 0) errs := make([]error, 0) for _, addr := range addrs { - netAddr, err := types.NewNetAddressString(addr) + netAddr, err := p2p.NewNetAddressString(addr) if err != nil { errs = append(errs, err) } else { diff --git a/internal/p2p/switch_test.go b/internal/p2p/switch_test.go index 8cb755c9f..4d9a31ef1 100644 --- a/internal/p2p/switch_test.go +++ b/internal/p2p/switch_test.go @@ -24,7 +24,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) var ( @@ -39,7 +39,7 @@ func init() { } type PeerMessage struct { - PeerID types.NodeID + PeerID p2p.NodeID Bytes []byte Counter int } diff --git a/internal/p2p/test_util.go b/internal/p2p/test_util.go index b2851646d..e257ec1c1 100644 --- a/internal/p2p/test_util.go +++ b/internal/p2p/test_util.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p/conn" @@ -27,7 +27,7 @@ func CreateRandomPeer(outbound bool) Peer { addr, netAddr := CreateRoutableAddr() p := &peer{ peerConn: peerConn{outbound: outbound}, - nodeInfo: types.NodeInfo{ + nodeInfo: p2p.NodeInfo{ NodeID: netAddr.ID, ListenAddr: netAddr.DialString(), }, @@ -47,7 +47,7 @@ func CreateRoutableAddr() (addr string, netAddr *NetAddress) { mrand.Int()%256, mrand.Int()%256, mrand.Int()%256) - netAddr, err = types.NewNetAddressString(addr) + netAddr, err = p2p.NewNetAddressString(addr) if err != nil { panic(err) } @@ -170,9 +170,9 @@ func MakeSwitch( opts ...SwitchOption, ) *Switch { - nodeKey := types.GenNodeKey() + nodeKey := p2p.GenNodeKey() nodeInfo := testNodeInfo(nodeKey.ID, fmt.Sprintf("node%d", i)) - addr, err := types.NewNetAddressString( + addr, err := p2p.NewNetAddressString( nodeKey.ID.AddressString(nodeInfo.ListenAddr), ) if err != nil { @@ -227,12 +227,12 @@ func testPeerConn( //---------------------------------------------------------------- // rand node info -func testNodeInfo(id types.NodeID, name string) types.NodeInfo { +func testNodeInfo(id p2p.NodeID, name string) p2p.NodeInfo { return testNodeInfoWithNetwork(id, name, "testing") } -func testNodeInfoWithNetwork(id types.NodeID, name, network string) types.NodeInfo { - return types.NodeInfo{ +func testNodeInfoWithNetwork(id p2p.NodeID, name, network string) p2p.NodeInfo { + return p2p.NodeInfo{ ProtocolVersion: defaultProtocolVersion, NodeID: id, ListenAddr: fmt.Sprintf("127.0.0.1:%d", getFreePort()), @@ -240,7 +240,7 @@ func testNodeInfoWithNetwork(id types.NodeID, name, network string) types.NodeIn Version: "1.2.3-rc0-deadbeef", Channels: []byte{testCh}, Moniker: name, - Other: types.NodeInfoOther{ + Other: p2p.NodeInfoOther{ TxIndex: "on", RPCAddress: fmt.Sprintf("127.0.0.1:%d", getFreePort()), }, @@ -272,7 +272,7 @@ func (book *AddrBookMock) OurAddress(addr *NetAddress) bool { _, ok := book.OurAddrs[addr.String()] return ok } -func (book *AddrBookMock) MarkGood(types.NodeID) {} +func (book *AddrBookMock) MarkGood(p2p.NodeID) {} func (book *AddrBookMock) HasAddress(addr *NetAddress) bool { _, ok := book.Addrs[addr.String()] return ok diff --git a/internal/p2p/transport.go b/internal/p2p/transport.go index a3245dfc8..66ac8d03f 100644 --- a/internal/p2p/transport.go +++ b/internal/p2p/transport.go @@ -8,7 +8,7 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/p2p/conn" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/version" ) @@ -22,7 +22,7 @@ const ( // defaultProtocolVersion populates the Block and P2P versions using // the global values, but not the App. -var defaultProtocolVersion = types.ProtocolVersion{ +var defaultProtocolVersion = p2p.ProtocolVersion{ P2P: version.P2PProtocol, Block: version.BlockProtocol, App: 0, @@ -84,7 +84,7 @@ type Connection interface { // FIXME: The handshake should really be the Router's responsibility, but // that requires the connection interface to be byte-oriented rather than // message-oriented (see comment above). - Handshake(context.Context, types.NodeInfo, crypto.PrivKey) (types.NodeInfo, crypto.PubKey, error) + Handshake(context.Context, p2p.NodeInfo, crypto.PrivKey) (p2p.NodeInfo, crypto.PubKey, error) // ReceiveMessage returns the next message received on the connection, // blocking until one is available. Returns io.EOF if closed. @@ -155,8 +155,8 @@ type Endpoint struct { Path string } -// NewEndpoint constructs an Endpoint from a types.NetAddress structure. -func NewEndpoint(na *types.NetAddress) Endpoint { +// NewEndpoint constructs an Endpoint from a p2p.NetAddress structure. +func NewEndpoint(na *p2p.NetAddress) Endpoint { return Endpoint{ Protocol: MConnProtocol, IP: na.IP, @@ -165,7 +165,7 @@ func NewEndpoint(na *types.NetAddress) Endpoint { } // NodeAddress converts the endpoint into a NodeAddress for the given node ID. -func (e Endpoint) NodeAddress(nodeID types.NodeID) NodeAddress { +func (e Endpoint) NodeAddress(nodeID p2p.NodeID) NodeAddress { address := NodeAddress{ NodeID: nodeID, Protocol: e.Protocol, @@ -184,7 +184,7 @@ func (e Endpoint) String() string { // assume that path is a node ID (to handle opaque URLs of the form // scheme:id). if e.IP == nil { - if nodeID, err := types.NewNodeID(e.Path); err == nil { + if nodeID, err := p2p.NewNodeID(e.Path); err == nil { return e.NodeAddress(nodeID).String() } } diff --git a/internal/p2p/transport_mconn.go b/internal/p2p/transport_mconn.go index eca261476..7162a8627 100644 --- a/internal/p2p/transport_mconn.go +++ b/internal/p2p/transport_mconn.go @@ -16,8 +16,8 @@ import ( "github.com/tendermint/tendermint/internal/libs/protoio" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/p2p" p2pproto "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) const ( @@ -255,12 +255,12 @@ func newMConnConnection( // Handshake implements Connection. func (c *mConnConnection) Handshake( ctx context.Context, - nodeInfo types.NodeInfo, + nodeInfo p2p.NodeInfo, privKey crypto.PrivKey, -) (types.NodeInfo, crypto.PubKey, error) { +) (p2p.NodeInfo, crypto.PubKey, error) { var ( mconn *conn.MConnection - peerInfo types.NodeInfo + peerInfo p2p.NodeInfo peerKey crypto.PubKey errCh = make(chan error, 1) ) @@ -283,16 +283,16 @@ func (c *mConnConnection) Handshake( select { case <-ctx.Done(): _ = c.Close() - return types.NodeInfo{}, nil, ctx.Err() + return p2p.NodeInfo{}, nil, ctx.Err() case err := <-errCh: if err != nil { - return types.NodeInfo{}, nil, err + return p2p.NodeInfo{}, nil, err } c.mconn = mconn c.logger = mconn.Logger if err = c.mconn.Start(); err != nil { - return types.NodeInfo{}, nil, err + return p2p.NodeInfo{}, nil, err } return peerInfo, peerKey, nil } @@ -303,16 +303,16 @@ func (c *mConnConnection) Handshake( // unstarted but handshaked MConnection, to avoid concurrent field writes. func (c *mConnConnection) handshake( ctx context.Context, - nodeInfo types.NodeInfo, + nodeInfo p2p.NodeInfo, privKey crypto.PrivKey, -) (*conn.MConnection, types.NodeInfo, crypto.PubKey, error) { +) (*conn.MConnection, p2p.NodeInfo, crypto.PubKey, error) { if c.mconn != nil { - return nil, types.NodeInfo{}, nil, errors.New("connection is already handshaked") + return nil, p2p.NodeInfo{}, nil, errors.New("connection is already handshaked") } secretConn, err := conn.MakeSecretConnection(c.conn, privKey) if err != nil { - return nil, types.NodeInfo{}, nil, err + return nil, p2p.NodeInfo{}, nil, err } var pbPeerInfo p2pproto.NodeInfo @@ -322,17 +322,17 @@ func (c *mConnConnection) handshake( errCh <- err }() go func() { - _, err := protoio.NewDelimitedReader(secretConn, types.MaxNodeInfoSize()).ReadMsg(&pbPeerInfo) + _, err := protoio.NewDelimitedReader(secretConn, p2p.MaxNodeInfoSize()).ReadMsg(&pbPeerInfo) errCh <- err }() for i := 0; i < cap(errCh); i++ { if err = <-errCh; err != nil { - return nil, types.NodeInfo{}, nil, err + return nil, p2p.NodeInfo{}, nil, err } } - peerInfo, err := types.NodeInfoFromProto(&pbPeerInfo) + peerInfo, err := p2p.NodeInfoFromProto(&pbPeerInfo) if err != nil { - return nil, types.NodeInfo{}, nil, err + return nil, p2p.NodeInfo{}, nil, err } mconn := conn.NewMConnectionWithConfig( diff --git a/internal/p2p/transport_memory.go b/internal/p2p/transport_memory.go index 09a387254..c6a9fb66e 100644 --- a/internal/p2p/transport_memory.go +++ b/internal/p2p/transport_memory.go @@ -12,7 +12,7 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p/conn" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) const ( @@ -28,7 +28,7 @@ type MemoryNetwork struct { logger log.Logger mtx sync.RWMutex - transports map[types.NodeID]*MemoryTransport + transports map[p2p.NodeID]*MemoryTransport bufferSize int } @@ -37,14 +37,14 @@ func NewMemoryNetwork(logger log.Logger, bufferSize int) *MemoryNetwork { return &MemoryNetwork{ bufferSize: bufferSize, logger: logger, - transports: map[types.NodeID]*MemoryTransport{}, + transports: map[p2p.NodeID]*MemoryTransport{}, } } // CreateTransport creates a new memory transport endpoint with the given node // ID and immediately begins listening on the address "memory:". It panics // if the node ID is already in use (which is fine, since this is for tests). -func (n *MemoryNetwork) CreateTransport(nodeID types.NodeID) *MemoryTransport { +func (n *MemoryNetwork) CreateTransport(nodeID p2p.NodeID) *MemoryTransport { t := newMemoryTransport(n, nodeID) n.mtx.Lock() @@ -57,14 +57,14 @@ func (n *MemoryNetwork) CreateTransport(nodeID types.NodeID) *MemoryTransport { } // GetTransport looks up a transport in the network, returning nil if not found. -func (n *MemoryNetwork) GetTransport(id types.NodeID) *MemoryTransport { +func (n *MemoryNetwork) GetTransport(id p2p.NodeID) *MemoryTransport { n.mtx.RLock() defer n.mtx.RUnlock() return n.transports[id] } // RemoveTransport removes a transport from the network and closes it. -func (n *MemoryNetwork) RemoveTransport(id types.NodeID) { +func (n *MemoryNetwork) RemoveTransport(id p2p.NodeID) { n.mtx.Lock() t, ok := n.transports[id] delete(n.transports, id) @@ -92,7 +92,7 @@ func (n *MemoryNetwork) Size() int { type MemoryTransport struct { logger log.Logger network *MemoryNetwork - nodeID types.NodeID + nodeID p2p.NodeID bufferSize int acceptCh chan *MemoryConnection @@ -102,7 +102,7 @@ type MemoryTransport struct { // newMemoryTransport creates a new MemoryTransport. This is for internal use by // MemoryNetwork, use MemoryNetwork.CreateTransport() instead. -func newMemoryTransport(network *MemoryNetwork, nodeID types.NodeID) *MemoryTransport { +func newMemoryTransport(network *MemoryNetwork, nodeID p2p.NodeID) *MemoryTransport { return &MemoryTransport{ logger: network.logger.With("local", nodeID), network: network, @@ -163,7 +163,7 @@ func (t *MemoryTransport) Dial(ctx context.Context, endpoint Endpoint) (Connecti return nil, err } - nodeID, err := types.NewNodeID(endpoint.Path) + nodeID, err := p2p.NewNodeID(endpoint.Path) if err != nil { return nil, err } @@ -204,8 +204,8 @@ func (t *MemoryTransport) Close() error { // MemoryConnection is an in-memory connection between two transport endpoints. type MemoryConnection struct { logger log.Logger - localID types.NodeID - remoteID types.NodeID + localID p2p.NodeID + remoteID p2p.NodeID receiveCh <-chan memoryMessage sendCh chan<- memoryMessage @@ -218,15 +218,15 @@ type memoryMessage struct { message []byte // For handshakes. - nodeInfo *types.NodeInfo + nodeInfo *p2p.NodeInfo pubKey crypto.PubKey } // newMemoryConnection creates a new MemoryConnection. func newMemoryConnection( logger log.Logger, - localID types.NodeID, - remoteID types.NodeID, + localID p2p.NodeID, + remoteID p2p.NodeID, receiveCh <-chan memoryMessage, sendCh chan<- memoryMessage, closer *tmsync.Closer, @@ -270,29 +270,29 @@ func (c *MemoryConnection) Status() conn.ConnectionStatus { // Handshake implements Connection. func (c *MemoryConnection) Handshake( ctx context.Context, - nodeInfo types.NodeInfo, + nodeInfo p2p.NodeInfo, privKey crypto.PrivKey, -) (types.NodeInfo, crypto.PubKey, error) { +) (p2p.NodeInfo, crypto.PubKey, error) { select { case c.sendCh <- memoryMessage{nodeInfo: &nodeInfo, pubKey: privKey.PubKey()}: c.logger.Debug("sent handshake", "nodeInfo", nodeInfo) case <-c.closer.Done(): - return types.NodeInfo{}, nil, io.EOF + return p2p.NodeInfo{}, nil, io.EOF case <-ctx.Done(): - return types.NodeInfo{}, nil, ctx.Err() + return p2p.NodeInfo{}, nil, ctx.Err() } select { case msg := <-c.receiveCh: if msg.nodeInfo == nil { - return types.NodeInfo{}, nil, errors.New("no NodeInfo in handshake") + return p2p.NodeInfo{}, nil, errors.New("no NodeInfo in handshake") } c.logger.Debug("received handshake", "peerInfo", msg.nodeInfo) return *msg.nodeInfo, msg.pubKey, nil case <-c.closer.Done(): - return types.NodeInfo{}, nil, io.EOF + return p2p.NodeInfo{}, nil, io.EOF case <-ctx.Done(): - return types.NodeInfo{}, nil, ctx.Err() + return p2p.NodeInfo{}, nil, ctx.Err() } } diff --git a/internal/p2p/transport_memory_test.go b/internal/p2p/transport_memory_test.go index c4eea65c3..27c9ce78c 100644 --- a/internal/p2p/transport_memory_test.go +++ b/internal/p2p/transport_memory_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // Transports are mainly tested by common tests in transport_test.go, we @@ -21,7 +21,7 @@ func init() { network = p2p.NewMemoryNetwork(log.TestingLogger(), 1) } i := byte(network.Size()) - nodeID, err := types.NewNodeID(hex.EncodeToString(bytes.Repeat([]byte{i<<4 + i}, 20))) + nodeID, err := p2ptypes.NewNodeID(hex.EncodeToString(bytes.Repeat([]byte{i<<4 + i}, 20))) require.NoError(t, err) transport := network.CreateTransport(nodeID) diff --git a/internal/p2p/transport_test.go b/internal/p2p/transport_test.go index 1b8ab77f5..ccd50188a 100644 --- a/internal/p2p/transport_test.go +++ b/internal/p2p/transport_test.go @@ -14,7 +14,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" - "github.com/tendermint/tendermint/types" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ) // transportFactory is used to set up transports for tests. @@ -242,9 +242,9 @@ func TestConnection_Handshake(t *testing.T) { // A handshake should pass the given keys and NodeInfo. aKey := ed25519.GenPrivKey() - aInfo := types.NodeInfo{ - NodeID: types.NodeIDFromPubKey(aKey.PubKey()), - ProtocolVersion: types.ProtocolVersion{ + aInfo := p2ptypes.NodeInfo{ + NodeID: p2ptypes.NodeIDFromPubKey(aKey.PubKey()), + ProtocolVersion: p2ptypes.ProtocolVersion{ P2P: 1, Block: 2, App: 3, @@ -254,13 +254,13 @@ func TestConnection_Handshake(t *testing.T) { Version: "1.2.3", Channels: bytes.HexBytes([]byte{0xf0, 0x0f}), Moniker: "moniker", - Other: types.NodeInfoOther{ + Other: p2ptypes.NodeInfoOther{ TxIndex: "txindex", RPCAddress: "rpc.domain.com", }, } bKey := ed25519.GenPrivKey() - bInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(bKey.PubKey())} + bInfo := p2ptypes.NodeInfo{NodeID: p2ptypes.NodeIDFromPubKey(bKey.PubKey())} errCh := make(chan error, 1) go func() { @@ -291,7 +291,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { ab, ba := dialAccept(t, a, b) timeoutCtx, cancel := context.WithTimeout(ctx, 1*time.Minute) cancel() - _, _, err := ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) + _, _, err := ab.Handshake(timeoutCtx, p2ptypes.NodeInfo{}, ed25519.GenPrivKey()) require.Error(t, err) require.Equal(t, context.Canceled, err) _ = ab.Close() @@ -301,7 +301,7 @@ func TestConnection_HandshakeCancel(t *testing.T) { ab, ba = dialAccept(t, a, b) timeoutCtx, cancel = context.WithTimeout(ctx, 200*time.Millisecond) defer cancel() - _, _, err = ab.Handshake(timeoutCtx, types.NodeInfo{}, ed25519.GenPrivKey()) + _, _, err = ab.Handshake(timeoutCtx, p2ptypes.NodeInfo{}, ed25519.GenPrivKey()) require.Error(t, err) require.Equal(t, context.DeadlineExceeded, err) _ = ab.Close() @@ -448,7 +448,7 @@ func TestEndpoint_NodeAddress(t *testing.T) { ip4 = []byte{1, 2, 3, 4} ip4in6 = net.IPv4(1, 2, 3, 4) ip6 = []byte{0xb1, 0x0c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01} - id = types.NodeID("00112233445566778899aabbccddeeff00112233") + id = p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233") ) testcases := []struct { @@ -503,7 +503,7 @@ func TestEndpoint_String(t *testing.T) { ip4 = []byte{1, 2, 3, 4} ip4in6 = net.IPv4(1, 2, 3, 4) ip6 = []byte{0xb1, 0x0c, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x01} - nodeID = types.NodeID("00112233445566778899aabbccddeeff00112233") + nodeID = p2ptypes.NodeID("00112233445566778899aabbccddeeff00112233") ) testcases := []struct { @@ -629,13 +629,13 @@ func dialAcceptHandshake(t *testing.T, a, b p2p.Transport) (p2p.Connection, p2p. errCh := make(chan error, 1) go func() { privKey := ed25519.GenPrivKey() - nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} + nodeInfo := p2ptypes.NodeInfo{NodeID: p2ptypes.NodeIDFromPubKey(privKey.PubKey())} _, _, err := ba.Handshake(ctx, nodeInfo, privKey) errCh <- err }() privKey := ed25519.GenPrivKey() - nodeInfo := types.NodeInfo{NodeID: types.NodeIDFromPubKey(privKey.PubKey())} + nodeInfo := p2ptypes.NodeInfo{NodeID: p2ptypes.NodeIDFromPubKey(privKey.PubKey())} _, _, err := ab.Handshake(ctx, nodeInfo, privKey) require.NoError(t, err) diff --git a/internal/statesync/block_queue.go b/internal/statesync/block_queue.go index 56ed3c376..bfbf7a10f 100644 --- a/internal/statesync/block_queue.go +++ b/internal/statesync/block_queue.go @@ -6,12 +6,13 @@ import ( "sync" "time" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/p2p" ) type lightBlockResponse struct { - block *types.LightBlock - peer types.NodeID + block *light.LightBlock + peer p2p.NodeID } // a block queue is used for asynchronously fetching and verifying light blocks @@ -26,7 +27,7 @@ type blockQueue struct { initialHeight int64 stopHeight int64 stopTime time.Time - terminal *types.LightBlock + terminal *light.LightBlock // track failed heights so we know what blocks to try fetch again failed *maxIntHeap diff --git a/internal/statesync/block_queue_test.go b/internal/statesync/block_queue_test.go index 3a4c71e4e..cb8595f4b 100644 --- a/internal/statesync/block_queue_test.go +++ b/internal/statesync/block_queue_test.go @@ -10,7 +10,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/test/factory" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) var ( @@ -22,7 +22,7 @@ var ( ) func TestBlockQueueBasic(t *testing.T) { - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) @@ -69,7 +69,7 @@ loop: // Test with spurious failures and retries func TestBlockQueueWithFailures(t *testing.T) { - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 200) @@ -119,7 +119,7 @@ func TestBlockQueueWithFailures(t *testing.T) { // Test that when all the blocks are retrieved that the queue still holds on to // it's workers and in the event of failure can still fetch the failed block func TestBlockQueueBlocks(t *testing.T) { - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 2) expectedHeight := startHeight @@ -166,7 +166,7 @@ loop: } func TestBlockQueueAcceptsNoMoreBlocks(t *testing.T) { - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) defer queue.close() @@ -191,7 +191,7 @@ loop: // Test a scenario where more blocks are needed then just the stopheight because // we haven't found a block with a small enough time. func TestBlockQueueStopTime(t *testing.T) { - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) queue := newBlockQueue(startHeight, stopHeight, 1, stopTime, 1) @@ -234,7 +234,7 @@ func TestBlockQueueStopTime(t *testing.T) { } func TestBlockQueueInitialHeight(t *testing.T) { - peerID, err := types.NewNodeID("0011223344556677889900112233445566778899") + peerID, err := p2p.NewNodeID("0011223344556677889900112233445566778899") require.NoError(t, err) const initialHeight int64 = 120 @@ -273,7 +273,7 @@ loop: } } -func mockLBResp(t *testing.T, peer types.NodeID, height int64, time time.Time) lightBlockResponse { +func mockLBResp(t *testing.T, peer p2p.NodeID, height int64, time time.Time) lightBlockResponse { return lightBlockResponse{ block: mockLB(t, height, time, factory.MakeBlockID()), peer: peer, diff --git a/internal/statesync/chunks.go b/internal/statesync/chunks.go index 590f128da..73300f9b8 100644 --- a/internal/statesync/chunks.go +++ b/internal/statesync/chunks.go @@ -10,7 +10,7 @@ import ( "time" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // errDone is returned by chunkQueue.Next() when all chunks have been returned. @@ -22,7 +22,7 @@ type chunk struct { Format uint32 Index uint32 Chunk []byte - Sender types.NodeID + Sender p2p.NodeID } // chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an @@ -33,7 +33,7 @@ type chunkQueue struct { snapshot *snapshot // if this is nil, the queue has been closed dir string // temp dir for on-disk chunk storage chunkFiles map[uint32]string // path to temporary chunk file - chunkSenders map[uint32]types.NodeID // the peer who sent the given chunk + chunkSenders map[uint32]p2p.NodeID // the peer who sent the given chunk chunkAllocated map[uint32]bool // chunks that have been allocated via Allocate() chunkReturned map[uint32]bool // chunks returned via Next() waiters map[uint32][]chan<- uint32 // signals WaitFor() waiters about chunk arrival @@ -54,7 +54,7 @@ func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { snapshot: snapshot, dir: dir, chunkFiles: make(map[uint32]string, snapshot.Chunks), - chunkSenders: make(map[uint32]types.NodeID, snapshot.Chunks), + chunkSenders: make(map[uint32]p2p.NodeID, snapshot.Chunks), chunkAllocated: make(map[uint32]bool, snapshot.Chunks), chunkReturned: make(map[uint32]bool, snapshot.Chunks), waiters: make(map[uint32][]chan<- uint32), @@ -188,7 +188,7 @@ func (q *chunkQueue) discard(index uint32) error { // DiscardSender discards all *unreturned* chunks from a given sender. If the caller wants to // discard already returned chunks, this can be done via Discard(). -func (q *chunkQueue) DiscardSender(peerID types.NodeID) error { +func (q *chunkQueue) DiscardSender(peerID p2p.NodeID) error { q.Lock() defer q.Unlock() @@ -208,7 +208,7 @@ func (q *chunkQueue) DiscardSender(peerID types.NodeID) error { // GetSender returns the sender of the chunk with the given index, or empty if // not found. -func (q *chunkQueue) GetSender(index uint32) types.NodeID { +func (q *chunkQueue) GetSender(index uint32) p2p.NodeID { q.Lock() defer q.Unlock() return q.chunkSenders[index] diff --git a/internal/statesync/chunks_test.go b/internal/statesync/chunks_test.go index ad7f19b3b..7a0588fe9 100644 --- a/internal/statesync/chunks_test.go +++ b/internal/statesync/chunks_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) func setupChunkQueue(t *testing.T) (*chunkQueue, func()) { @@ -274,7 +274,7 @@ func TestChunkQueue_DiscardSender(t *testing.T) { defer teardown() // Allocate and add all chunks to the queue - senders := []types.NodeID{types.NodeID("a"), types.NodeID("b"), types.NodeID("c")} + senders := []p2p.NodeID{p2p.NodeID("a"), p2p.NodeID("b"), p2p.NodeID("c")} for i := uint32(0); i < queue.Size(); i++ { _, err := queue.Allocate() require.NoError(t, err) @@ -295,14 +295,14 @@ func TestChunkQueue_DiscardSender(t *testing.T) { } // Discarding an unknown sender should do nothing - err := queue.DiscardSender(types.NodeID("x")) + err := queue.DiscardSender(p2p.NodeID("x")) require.NoError(t, err) _, err = queue.Allocate() assert.Equal(t, errDone, err) // Discarding sender b should discard chunk 4, but not chunk 1 which has already been // returned. - err = queue.DiscardSender(types.NodeID("b")) + err = queue.DiscardSender(p2p.NodeID("b")) require.NoError(t, err) index, err := queue.Allocate() require.NoError(t, err) @@ -315,8 +315,8 @@ func TestChunkQueue_GetSender(t *testing.T) { queue, teardown := setupChunkQueue(t) defer teardown() - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: peerAID}) require.NoError(t, err) @@ -354,7 +354,7 @@ func TestChunkQueue_Next(t *testing.T) { }() assert.Empty(t, chNext) - _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: types.NodeID("b")}) + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.NodeID("b")}) require.NoError(t, err) select { case <-chNext: @@ -362,17 +362,17 @@ func TestChunkQueue_Next(t *testing.T) { default: } - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: types.NodeID("a")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.NodeID("a")}) require.NoError(t, err) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: types.NodeID("a")}, + &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.NodeID("a")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: types.NodeID("b")}, + &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.NodeID("b")}, <-chNext) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: types.NodeID("e")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.NodeID("e")}) require.NoError(t, err) select { case <-chNext: @@ -380,19 +380,19 @@ func TestChunkQueue_Next(t *testing.T) { default: } - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: types.NodeID("c")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.NodeID("c")}) require.NoError(t, err) - _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: types.NodeID("d")}) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.NodeID("d")}) require.NoError(t, err) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: types.NodeID("c")}, + &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.NodeID("c")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: types.NodeID("d")}, + &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.NodeID("d")}, <-chNext) assert.Equal(t, - &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: types.NodeID("e")}, + &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.NodeID("e")}, <-chNext) _, ok := <-chNext diff --git a/internal/statesync/dispatcher.go b/internal/statesync/dispatcher.go index 394b77e38..93b9774a9 100644 --- a/internal/statesync/dispatcher.go +++ b/internal/statesync/dispatcher.go @@ -9,9 +9,11 @@ import ( "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/light" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" proto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) var ( @@ -31,7 +33,7 @@ type dispatcher struct { timeout time.Duration mtx sync.Mutex - calls map[types.NodeID]chan *types.LightBlock + calls map[p2ptypes.NodeID]chan *light.LightBlock running bool } @@ -40,14 +42,14 @@ func newDispatcher(requestCh chan<- p2p.Envelope, timeout time.Duration) *dispat availablePeers: newPeerList(), timeout: timeout, requestCh: requestCh, - calls: make(map[types.NodeID]chan *types.LightBlock), + calls: make(map[p2ptypes.NodeID]chan *light.LightBlock), running: true, } } // LightBlock uses the request channel to fetch a light block from the next peer // in a list, tracks the call and waits for the reactor to pass along the response -func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*types.LightBlock, types.NodeID, error) { +func (d *dispatcher) LightBlock(ctx context.Context, height int64) (*light.LightBlock, p2ptypes.NodeID, error) { d.mtx.Lock() // check to see that the dispatcher is connected to at least one peer if d.availablePeers.Len() == 0 && len(d.calls) == 0 { @@ -98,7 +100,7 @@ func (d *dispatcher) start() { d.running = true } -func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer types.NodeID) (*types.LightBlock, error) { +func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer p2ptypes.NodeID) (*light.LightBlock, error) { // dispatch the request to the peer callCh, err := d.dispatch(peer, height) if err != nil { @@ -122,7 +124,7 @@ func (d *dispatcher) lightBlock(ctx context.Context, height int64, peer types.No // respond allows the underlying process which receives requests on the // requestCh to respond with the respective light block -func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error { +func (d *dispatcher) respond(lb *proto.LightBlock, peer p2ptypes.NodeID) error { d.mtx.Lock() defer d.mtx.Unlock() @@ -142,7 +144,7 @@ func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error { return nil } - block, err := types.LightBlockFromProto(lb) + block, err := light.LightBlockFromProto(lb) if err != nil { fmt.Println("error with converting light block") return err @@ -152,11 +154,11 @@ func (d *dispatcher) respond(lb *proto.LightBlock, peer types.NodeID) error { return nil } -func (d *dispatcher) addPeer(peer types.NodeID) { +func (d *dispatcher) addPeer(peer p2ptypes.NodeID) { d.availablePeers.Append(peer) } -func (d *dispatcher) removePeer(peer types.NodeID) { +func (d *dispatcher) removePeer(peer p2ptypes.NodeID) { d.mtx.Lock() defer d.mtx.Unlock() if _, ok := d.calls[peer]; ok { @@ -168,10 +170,10 @@ func (d *dispatcher) removePeer(peer types.NodeID) { // dispatch takes a peer and allocates it a channel so long as it's not already // busy and the receiving channel is still running. It then dispatches the message -func (d *dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.LightBlock, error) { +func (d *dispatcher) dispatch(peer p2ptypes.NodeID, height int64) (chan *light.LightBlock, error) { d.mtx.Lock() defer d.mtx.Unlock() - ch := make(chan *types.LightBlock, 1) + ch := make(chan *light.LightBlock, 1) // check if the dispatcher is running or not if !d.running { @@ -198,7 +200,7 @@ func (d *dispatcher) dispatch(peer types.NodeID, height int64) (chan *types.Ligh // release appends the peer back to the list and deletes the allocated call so // that a new call can be made to that peer -func (d *dispatcher) release(peer types.NodeID) { +func (d *dispatcher) release(peer p2ptypes.NodeID) { d.mtx.Lock() defer d.mtx.Unlock() if call, ok := d.calls[peer]; ok { @@ -216,13 +218,13 @@ func (d *dispatcher) release(peer types.NodeID) { // TODO: This should probably be moved over to the light package but as we're // not yet officially supporting p2p light clients we'll leave this here for now. type blockProvider struct { - peer types.NodeID + peer p2ptypes.NodeID chainID string timeout time.Duration dispatcher *dispatcher } -func (p *blockProvider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { +func (p *blockProvider) LightBlock(ctx context.Context, height int64) (*light.LightBlock, error) { // FIXME: The provider doesn't know if the dispatcher is still connected to // that peer. If the connection is dropped for whatever reason the // dispatcher needs to be able to relay this back to the provider so it can @@ -245,7 +247,7 @@ func (p *blockProvider) LightBlock(ctx context.Context, height int64) (*types.Li // attacks. This is a no op as there currently isn't a way to wire this up to // the evidence reactor (we should endeavor to do this in the future but for now // it's not critical for backwards verification) -func (p *blockProvider) ReportEvidence(ctx context.Context, ev types.Evidence) error { +func (p *blockProvider) ReportEvidence(ctx context.Context, ev evidence.Evidence) error { return nil } @@ -258,14 +260,14 @@ func (p *blockProvider) String() string { return string(p.peer) } // retrieving blocks over all the peers the reactor is connected to type peerlist struct { mtx sync.Mutex - peers []types.NodeID - waiting []chan types.NodeID + peers []p2ptypes.NodeID + waiting []chan p2ptypes.NodeID } func newPeerList() *peerlist { return &peerlist{ - peers: make([]types.NodeID, 0), - waiting: make([]chan types.NodeID, 0), + peers: make([]p2ptypes.NodeID, 0), + waiting: make([]chan p2ptypes.NodeID, 0), } } @@ -275,12 +277,12 @@ func (l *peerlist) Len() int { return len(l.peers) } -func (l *peerlist) Pop(ctx context.Context) types.NodeID { +func (l *peerlist) Pop(ctx context.Context) p2ptypes.NodeID { l.mtx.Lock() if len(l.peers) == 0 { // if we don't have any peers in the list we block until a peer is // appended - wait := make(chan types.NodeID, 1) + wait := make(chan p2ptypes.NodeID, 1) l.waiting = append(l.waiting, wait) // unlock whilst waiting so that the list can be appended to l.mtx.Unlock() @@ -299,7 +301,7 @@ func (l *peerlist) Pop(ctx context.Context) types.NodeID { return peer } -func (l *peerlist) Append(peer types.NodeID) { +func (l *peerlist) Append(peer p2ptypes.NodeID) { l.mtx.Lock() defer l.mtx.Unlock() if len(l.waiting) > 0 { @@ -312,7 +314,7 @@ func (l *peerlist) Append(peer types.NodeID) { } } -func (l *peerlist) Remove(peer types.NodeID) { +func (l *peerlist) Remove(peer p2ptypes.NodeID) { l.mtx.Lock() defer l.mtx.Unlock() for i, p := range l.peers { @@ -323,7 +325,7 @@ func (l *peerlist) Remove(peer types.NodeID) { } } -func (l *peerlist) Peers() []types.NodeID { +func (l *peerlist) Peers() []p2ptypes.NodeID { l.mtx.Lock() defer l.mtx.Unlock() return l.peers diff --git a/internal/statesync/dispatcher_test.go b/internal/statesync/dispatcher_test.go index 469630894..79aa48cb4 100644 --- a/internal/statesync/dispatcher_test.go +++ b/internal/statesync/dispatcher_test.go @@ -13,8 +13,9 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/p2p" + "github.com/tendermint/tendermint/pkg/light" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" - "github.com/tendermint/tendermint/types" ) func TestDispatcherBasic(t *testing.T) { @@ -112,7 +113,7 @@ func TestDispatcherReturnsBlockOncePeerAvailable(t *testing.T) { go func() { <-dispatcherRequestCh - lb := &types.LightBlock{} + lb := &light.LightBlock{} asProto, err := lb.ToProto() require.Nil(t, err) err = d.respond(asProto, peerFromSet) @@ -178,7 +179,7 @@ func TestPeerListBasic(t *testing.T) { } assert.Equal(t, half, peerList.Len()) - peerList.Remove(types.NodeID("lp")) + peerList.Remove(p2ptypes.NodeID("lp")) assert.Equal(t, half, peerList.Len()) peerList.Remove(peerSet[half]) @@ -295,10 +296,10 @@ func handleRequests(t *testing.T, d *dispatcher, ch chan p2p.Envelope, closeCh c } } -func createPeerSet(num int) []types.NodeID { - peers := make([]types.NodeID, num) +func createPeerSet(num int) []p2ptypes.NodeID { + peers := make([]p2ptypes.NodeID, num) for i := 0; i < num; i++ { - peers[i], _ = types.NewNodeID(strings.Repeat(fmt.Sprintf("%d", i), 2*types.NodeIDByteLength)) + peers[i], _ = p2ptypes.NewNodeID(strings.Repeat(fmt.Sprintf("%d", i), 2*p2ptypes.NodeIDByteLength)) } return peers } diff --git a/internal/statesync/mocks/state_provider.go b/internal/statesync/mocks/state_provider.go index 538c619fc..210dc35ac 100644 --- a/internal/statesync/mocks/state_provider.go +++ b/internal/statesync/mocks/state_provider.go @@ -6,9 +6,9 @@ import ( context "context" mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + metadata "github.com/tendermint/tendermint/pkg/metadata" - types "github.com/tendermint/tendermint/types" + state "github.com/tendermint/tendermint/state" ) // StateProvider is an autogenerated mock type for the StateProvider type @@ -40,15 +40,15 @@ func (_m *StateProvider) AppHash(ctx context.Context, height uint64) ([]byte, er } // Commit provides a mock function with given fields: ctx, height -func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { +func (_m *StateProvider) Commit(ctx context.Context, height uint64) (*metadata.Commit, error) { ret := _m.Called(ctx, height) - var r0 *types.Commit - if rf, ok := ret.Get(0).(func(context.Context, uint64) *types.Commit); ok { + var r0 *metadata.Commit + if rf, ok := ret.Get(0).(func(context.Context, uint64) *metadata.Commit); ok { r0 = rf(ctx, height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Commit) + r0 = ret.Get(0).(*metadata.Commit) } } diff --git a/internal/statesync/reactor.go b/internal/statesync/reactor.go index 59cbabd14..1b393e7db 100644 --- a/internal/statesync/reactor.go +++ b/internal/statesync/reactor.go @@ -10,17 +10,19 @@ import ( "sort" "time" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) var ( @@ -304,7 +306,7 @@ func (r *Reactor) backfill( ctx context.Context, chainID string, startHeight, stopHeight, initialHeight int64, - trustedBlockID types.BlockID, + trustedBlockID metadata.BlockID, stopTime time.Time, ) error { r.Logger.Info("starting backfill process...", "startHeight", startHeight, @@ -312,7 +314,7 @@ func (r *Reactor) backfill( const sleepTime = 1 * time.Second var ( - lastValidatorSet *types.ValidatorSet + lastValidatorSet *consensus.ValidatorSet lastChangeHeight int64 = startHeight ) @@ -811,7 +813,7 @@ func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { // fetchLightBlock works out whether the node has a light block at a particular // height and if so returns it so it can be gossiped to peers -func (r *Reactor) fetchLightBlock(height uint64) (*types.LightBlock, error) { +func (r *Reactor) fetchLightBlock(height uint64) (*light.LightBlock, error) { h := int64(height) blockMeta := r.blockStore.LoadBlockMeta(h) @@ -832,8 +834,8 @@ func (r *Reactor) fetchLightBlock(height uint64) (*types.LightBlock, error) { return nil, nil } - return &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + return &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: &blockMeta.Header, Commit: commit, }, diff --git a/internal/statesync/reactor_test.go b/internal/statesync/reactor_test.go index 9bff72679..d394c557b 100644 --- a/internal/statesync/reactor_test.go +++ b/internal/statesync/reactor_test.go @@ -12,19 +12,22 @@ import ( "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/statesync/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" proxymocks "github.com/tendermint/tendermint/proxy/mocks" smmocks "github.com/tendermint/tendermint/state/mocks" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) type reactorTestSuite struct { @@ -166,7 +169,7 @@ func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { rts := setup(t, nil, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ - From: types.NodeID("aa"), + From: p2ptypes.NodeID("aa"), Message: &ssproto.SnapshotsRequest{}, } @@ -174,7 +177,7 @@ func TestReactor_ChunkRequest_InvalidRequest(t *testing.T) { require.Error(t, response.Err) require.Empty(t, rts.chunkOutCh) require.Contains(t, response.Err.Error(), "received unknown message") - require.Equal(t, types.NodeID("aa"), response.NodeID) + require.Equal(t, p2ptypes.NodeID("aa"), response.NodeID) } func TestReactor_ChunkRequest(t *testing.T) { @@ -220,7 +223,7 @@ func TestReactor_ChunkRequest(t *testing.T) { rts := setup(t, conn, nil, nil, 2) rts.chunkInCh <- p2p.Envelope{ - From: types.NodeID("aa"), + From: p2ptypes.NodeID("aa"), Message: tc.request, } @@ -237,7 +240,7 @@ func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { rts := setup(t, nil, nil, nil, 2) rts.snapshotInCh <- p2p.Envelope{ - From: types.NodeID("aa"), + From: p2ptypes.NodeID("aa"), Message: &ssproto.ChunkRequest{}, } @@ -245,7 +248,7 @@ func TestReactor_SnapshotsRequest_InvalidRequest(t *testing.T) { require.Error(t, response.Err) require.Empty(t, rts.snapshotOutCh) require.Contains(t, response.Err.Error(), "received unknown message") - require.Equal(t, types.NodeID("aa"), response.NodeID) + require.Equal(t, p2ptypes.NodeID("aa"), response.NodeID) } func TestReactor_SnapshotsRequest(t *testing.T) { @@ -297,7 +300,7 @@ func TestReactor_SnapshotsRequest(t *testing.T) { rts := setup(t, conn, nil, nil, 100) rts.snapshotInCh <- p2p.Envelope{ - From: types.NodeID("aa"), + From: p2ptypes.NodeID("aa"), Message: &ssproto.SnapshotsRequest{}, } @@ -329,18 +332,18 @@ func TestReactor_LightBlockResponse(t *testing.T) { blockID, factory.DefaultTestTime) require.NoError(t, err) - sh := &types.SignedHeader{ + sh := &metadata.SignedHeader{ Header: h, - Commit: &types.Commit{ + Commit: &metadata.Commit{ Height: h.Height, BlockID: blockID, - Signatures: []types.CommitSig{ + Signatures: []metadata.CommitSig{ vote.CommitSig(), }, }, } - lb := &types.LightBlock{ + lb := &light.LightBlock{ SignedHeader: sh, ValidatorSet: vals, } @@ -350,7 +353,7 @@ func TestReactor_LightBlockResponse(t *testing.T) { rts.stateStore.On("LoadValidators", height).Return(vals, nil) rts.blockInCh <- p2p.Envelope{ - From: types.NodeID("aa"), + From: p2ptypes.NodeID("aa"), Message: &ssproto.LightBlockRequest{ Height: 10, }, @@ -359,10 +362,10 @@ func TestReactor_LightBlockResponse(t *testing.T) { select { case response := <-rts.blockOutCh: - require.Equal(t, types.NodeID("aa"), response.To) + require.Equal(t, p2ptypes.NodeID("aa"), response.To) res, ok := response.Message.(*ssproto.LightBlockResponse) require.True(t, ok) - receivedLB, err := types.LightBlockFromProto(res.LightBlock) + receivedLB, err := light.LightBlockFromProto(res.LightBlock) require.NoError(t, err) require.Equal(t, lb, receivedLB) case <-time.After(1 * time.Second): @@ -373,11 +376,11 @@ func TestReactor_LightBlockResponse(t *testing.T) { func TestReactor_Dispatcher(t *testing.T) { rts := setup(t, nil, nil, nil, 2) rts.peerUpdateCh <- p2p.PeerUpdate{ - NodeID: types.NodeID("aa"), + NodeID: p2ptypes.NodeID("aa"), Status: p2p.PeerStatusUp, } rts.peerUpdateCh <- p2p.PeerUpdate{ - NodeID: types.NodeID("bb"), + NodeID: p2ptypes.NodeID("bb"), Status: p2p.PeerStatusUp, } @@ -436,14 +439,14 @@ func TestReactor_Backfill(t *testing.T) { peers := []string{"a", "b", "c", "d"} for _, peer := range peers { rts.peerUpdateCh <- p2p.PeerUpdate{ - NodeID: types.NodeID(peer), + NodeID: p2ptypes.NodeID(peer), Status: p2p.PeerStatusUp, } } trackingHeight := startHeight rts.stateStore.On("SaveValidatorSets", mock.AnythingOfType("int64"), mock.AnythingOfType("int64"), - mock.AnythingOfType("*types.ValidatorSet")).Return(func(lh, uh int64, vals *types.ValidatorSet) error { + mock.AnythingOfType("*types.ValidatorSet")).Return(func(lh, uh int64, vals *consensus.ValidatorSet) error { require.Equal(t, trackingHeight, lh) require.Equal(t, lh, uh) require.GreaterOrEqual(t, lh, stopHeight) @@ -500,7 +503,7 @@ func retryUntil(t *testing.T, fn func() bool, timeout time.Duration) { } func handleLightBlockRequests(t *testing.T, - chain map[int64]*types.LightBlock, + chain map[int64]*light.LightBlock, receiving chan p2p.Envelope, sending chan p2p.Envelope, close chan struct{}, @@ -550,8 +553,8 @@ func handleLightBlockRequests(t *testing.T, } } -func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime time.Time) map[int64]*types.LightBlock { - chain := make(map[int64]*types.LightBlock, toHeight-fromHeight) +func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime time.Time) map[int64]*light.LightBlock { + chain := make(map[int64]*light.LightBlock, toHeight-fromHeight) lastBlockID := factory.MakeBlockID() blockTime := startTime.Add(-5 * time.Minute) for height := fromHeight; height < toHeight; height++ { @@ -563,8 +566,8 @@ func buildLightBlockChain(t *testing.T, fromHeight, toHeight int64, startTime ti } func mockLB(t *testing.T, height int64, time time.Time, - lastBlockID types.BlockID) *types.LightBlock { - header, err := factory.MakeHeader(&types.Header{ + lastBlockID metadata.BlockID) *light.LightBlock { + header, err := factory.MakeHeader(&metadata.Header{ Height: height, LastBlockID: lastBlockID, Time: time, @@ -573,11 +576,11 @@ func mockLB(t *testing.T, height int64, time time.Time, vals, pv := factory.RandValidatorSet(3, 10) header.ValidatorsHash = vals.Hash() lastBlockID = factory.MakeBlockIDWithHash(header.Hash()) - voteSet := types.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, vals) + voteSet := consensus.NewVoteSet(factory.DefaultTestChainID, height, 0, tmproto.PrecommitType, vals) commit, err := factory.MakeCommit(lastBlockID, height, 0, voteSet, pv, time) require.NoError(t, err) - return &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + return &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: header, Commit: commit, }, diff --git a/internal/statesync/snapshots.go b/internal/statesync/snapshots.go index 9058304a9..0d097b9f5 100644 --- a/internal/statesync/snapshots.go +++ b/internal/statesync/snapshots.go @@ -10,7 +10,7 @@ import ( "time" tmsync "github.com/tendermint/tendermint/internal/libs/sync" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) // snapshotKey is a snapshot key used for lookups. @@ -47,16 +47,16 @@ type snapshotPool struct { tmsync.Mutex snapshots map[snapshotKey]*snapshot - snapshotPeers map[snapshotKey]map[types.NodeID]types.NodeID + snapshotPeers map[snapshotKey]map[p2p.NodeID]p2p.NodeID // indexes for fast searches formatIndex map[uint32]map[snapshotKey]bool heightIndex map[uint64]map[snapshotKey]bool - peerIndex map[types.NodeID]map[snapshotKey]bool + peerIndex map[p2p.NodeID]map[snapshotKey]bool // blacklists for rejected items formatBlacklist map[uint32]bool - peerBlacklist map[types.NodeID]bool + peerBlacklist map[p2p.NodeID]bool snapshotBlacklist map[snapshotKey]bool } @@ -65,12 +65,12 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool { return &snapshotPool{ stateProvider: stateProvider, snapshots: make(map[snapshotKey]*snapshot), - snapshotPeers: make(map[snapshotKey]map[types.NodeID]types.NodeID), + snapshotPeers: make(map[snapshotKey]map[p2p.NodeID]p2p.NodeID), formatIndex: make(map[uint32]map[snapshotKey]bool), heightIndex: make(map[uint64]map[snapshotKey]bool), - peerIndex: make(map[types.NodeID]map[snapshotKey]bool), + peerIndex: make(map[p2p.NodeID]map[snapshotKey]bool), formatBlacklist: make(map[uint32]bool), - peerBlacklist: make(map[types.NodeID]bool), + peerBlacklist: make(map[p2p.NodeID]bool), snapshotBlacklist: make(map[snapshotKey]bool), } } @@ -79,7 +79,7 @@ func newSnapshotPool(stateProvider StateProvider) *snapshotPool { // snapshots. It returns true if this was a new, non-blacklisted snapshot. The // snapshot height is verified using the light client, and the expected app hash // is set for the snapshot. -func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error) { +func (p *snapshotPool) Add(peerID p2p.NodeID, snapshot *snapshot) (bool, error) { ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second) defer cancel() @@ -105,7 +105,7 @@ func (p *snapshotPool) Add(peerID types.NodeID, snapshot *snapshot) (bool, error } if p.snapshotPeers[key] == nil { - p.snapshotPeers[key] = make(map[types.NodeID]types.NodeID) + p.snapshotPeers[key] = make(map[p2p.NodeID]p2p.NodeID) } p.snapshotPeers[key][peerID] = peerID @@ -142,7 +142,7 @@ func (p *snapshotPool) Best() *snapshot { } // GetPeer returns a random peer for a snapshot, if any. -func (p *snapshotPool) GetPeer(snapshot *snapshot) types.NodeID { +func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.NodeID { peers := p.GetPeers(snapshot) if len(peers) == 0 { return "" @@ -151,13 +151,13 @@ func (p *snapshotPool) GetPeer(snapshot *snapshot) types.NodeID { } // GetPeers returns the peers for a snapshot. -func (p *snapshotPool) GetPeers(snapshot *snapshot) []types.NodeID { +func (p *snapshotPool) GetPeers(snapshot *snapshot) []p2p.NodeID { key := snapshot.Key() p.Lock() defer p.Unlock() - peers := make([]types.NodeID, 0, len(p.snapshotPeers[key])) + peers := make([]p2p.NodeID, 0, len(p.snapshotPeers[key])) for _, peer := range p.snapshotPeers[key] { peers = append(peers, peer) } @@ -254,7 +254,7 @@ func (p *snapshotPool) RejectFormat(format uint32) { } // RejectPeer rejects a peer. It will never be used again. -func (p *snapshotPool) RejectPeer(peerID types.NodeID) { +func (p *snapshotPool) RejectPeer(peerID p2p.NodeID) { if len(peerID) == 0 { return } @@ -267,14 +267,14 @@ func (p *snapshotPool) RejectPeer(peerID types.NodeID) { } // RemovePeer removes a peer from the pool, and any snapshots that no longer have peers. -func (p *snapshotPool) RemovePeer(peerID types.NodeID) { +func (p *snapshotPool) RemovePeer(peerID p2p.NodeID) { p.Lock() defer p.Unlock() p.removePeer(peerID) } // removePeer removes a peer. The caller must hold the mutex lock. -func (p *snapshotPool) removePeer(peerID types.NodeID) { +func (p *snapshotPool) removePeer(peerID p2p.NodeID) { for key := range p.peerIndex[peerID] { delete(p.snapshotPeers[key], peerID) if len(p.snapshotPeers[key]) == 0 { diff --git a/internal/statesync/snapshots_test.go b/internal/statesync/snapshots_test.go index 6f27269f7..263db3f68 100644 --- a/internal/statesync/snapshots_test.go +++ b/internal/statesync/snapshots_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/internal/statesync/mocks" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) func TestSnapshot_Key(t *testing.T) { @@ -42,7 +42,7 @@ func TestSnapshotPool_Add(t *testing.T) { stateProvider := &mocks.StateProvider{} stateProvider.On("AppHash", mock.Anything, uint64(1)).Return([]byte("app_hash"), nil) - peerID := types.NodeID("aa") + peerID := p2p.NodeID("aa") // Adding to the pool should work pool := newSnapshotPool(stateProvider) @@ -56,7 +56,7 @@ func TestSnapshotPool_Add(t *testing.T) { require.True(t, added) // Adding again from a different peer should return false - otherNodeID := types.NodeID("bb") + otherNodeID := p2p.NodeID("bb") added, err = pool.Add(otherNodeID, &snapshot{ Height: 1, Format: 1, @@ -81,8 +81,8 @@ func TestSnapshotPool_GetPeer(t *testing.T) { s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") _, err := pool.Add(peerAID, s) require.NoError(t, err) @@ -118,8 +118,8 @@ func TestSnapshotPool_GetPeers(t *testing.T) { s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") _, err := pool.Add(peerAID, s) require.NoError(t, err) @@ -146,13 +146,13 @@ func TestSnapshotPool_Ranked_Best(t *testing.T) { // tie-breaker. expectSnapshots := []struct { snapshot *snapshot - peers []types.NodeID + peers []p2p.NodeID }{ - {&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []types.NodeID{"AA", "BB", "CC", "DD"}}, - {&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC", "DD"}}, - {&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, - {&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, - {&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []types.NodeID{"AA", "BB", "CC"}}, + {&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []p2p.NodeID{"AA", "BB", "CC", "DD"}}, + {&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC", "DD"}}, + {&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC"}}, + {&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC"}}, + {&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []p2p.NodeID{"AA", "BB", "CC"}}, } // Add snapshots in reverse order, to make sure the pool enforces some order. @@ -186,7 +186,7 @@ func TestSnapshotPool_Reject(t *testing.T) { stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peerID := types.NodeID("aa") + peerID := p2p.NodeID("aa") snapshots := []*snapshot{ {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, @@ -216,7 +216,7 @@ func TestSnapshotPool_RejectFormat(t *testing.T) { stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peerID := types.NodeID("aa") + peerID := p2p.NodeID("aa") snapshots := []*snapshot{ {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, @@ -247,8 +247,8 @@ func TestSnapshotPool_RejectPeer(t *testing.T) { stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} @@ -289,8 +289,8 @@ func TestSnapshotPool_RemovePeer(t *testing.T) { stateProvider.On("AppHash", mock.Anything, mock.Anything).Return([]byte("app_hash"), nil) pool := newSnapshotPool(stateProvider) - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} diff --git a/internal/statesync/stateprovider.go b/internal/statesync/stateprovider.go index fd889dc51..64ede194a 100644 --- a/internal/statesync/stateprovider.go +++ b/internal/statesync/stateprovider.go @@ -15,9 +15,9 @@ import ( lighthttp "github.com/tendermint/tendermint/light/provider/http" lightrpc "github.com/tendermint/tendermint/light/rpc" lightdb "github.com/tendermint/tendermint/light/store/db" + "github.com/tendermint/tendermint/pkg/metadata" rpchttp "github.com/tendermint/tendermint/rpc/client/http" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) //go:generate ../../scripts/mockery_generate.sh StateProvider @@ -28,7 +28,7 @@ type StateProvider interface { // AppHash returns the app hash after the given height has been committed. AppHash(ctx context.Context, height uint64) ([]byte, error) // Commit returns the commit at the given height. - Commit(ctx context.Context, height uint64) (*types.Commit, error) + Commit(ctx context.Context, height uint64) (*metadata.Commit, error) // State returns a state object at the given height. State(ctx context.Context, height uint64) (sm.State, error) } @@ -148,7 +148,7 @@ func (s *lightClientStateProvider) AppHash(ctx context.Context, height uint64) ( } // Commit implements StateProvider. -func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*types.Commit, error) { +func (s *lightClientStateProvider) Commit(ctx context.Context, height uint64) (*metadata.Commit, error) { s.Lock() defer s.Unlock() header, err := s.lc.VerifyLightBlockAtHeight(ctx, int64(height), time.Now()) diff --git a/internal/statesync/syncer.go b/internal/statesync/syncer.go index 5dc8aeb8c..e0d3caa9d 100644 --- a/internal/statesync/syncer.go +++ b/internal/statesync/syncer.go @@ -7,15 +7,16 @@ import ( "fmt" "time" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) const ( @@ -117,7 +118,7 @@ func (s *syncer) AddChunk(chunk *chunk) (bool, error) { // AddSnapshot adds a snapshot to the snapshot pool. It returns true if a new, previously unseen // snapshot was accepted and added. -func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, error) { +func (s *syncer) AddSnapshot(peerID p2ptypes.NodeID, snapshot *snapshot) (bool, error) { added, err := s.snapshots.Add(peerID, snapshot) if err != nil { return false, err @@ -131,7 +132,7 @@ func (s *syncer) AddSnapshot(peerID types.NodeID, snapshot *snapshot) (bool, err // AddPeer adds a peer to the pool. For now we just keep it simple and send a // single request to discover snapshots, later we may want to do retries and stuff. -func (s *syncer) AddPeer(peerID types.NodeID) { +func (s *syncer) AddPeer(peerID p2ptypes.NodeID) { s.logger.Debug("Requesting snapshots from peer", "peer", peerID) s.snapshotCh <- p2p.Envelope{ To: peerID, @@ -140,7 +141,7 @@ func (s *syncer) AddPeer(peerID types.NodeID) { } // RemovePeer removes a peer from the pool. -func (s *syncer) RemovePeer(peerID types.NodeID) { +func (s *syncer) RemovePeer(peerID p2ptypes.NodeID) { s.logger.Debug("Removing peer from sync", "peer", peerID) s.snapshots.RemovePeer(peerID) } @@ -152,7 +153,7 @@ func (s *syncer) SyncAny( ctx context.Context, discoveryTime time.Duration, requestSnapshots func(), -) (sm.State, *types.Commit, error) { +) (sm.State, *metadata.Commit, error) { if discoveryTime != 0 && discoveryTime < minimumDiscoveryTime { discoveryTime = minimumDiscoveryTime @@ -250,7 +251,7 @@ func (s *syncer) SyncAny( // Sync executes a sync for a specific snapshot, returning the latest state and block commit which // the caller must use to bootstrap the node. -func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) (sm.State, *types.Commit, error) { +func (s *syncer) Sync(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) (sm.State, *metadata.Commit, error) { s.mtx.Lock() if s.chunks != nil { s.mtx.Unlock() @@ -389,7 +390,7 @@ func (s *syncer) applyChunks(ctx context.Context, chunks *chunkQueue) error { // Reject any senders as requested by the app for _, sender := range resp.RejectSenders { if sender != "" { - peerID := types.NodeID(sender) + peerID := p2ptypes.NodeID(sender) s.snapshots.RejectPeer(peerID) if err := chunks.DiscardSender(peerID); err != nil { diff --git a/internal/statesync/syncer_test.go b/internal/statesync/syncer_test.go index c1d6b462a..d569d2862 100644 --- a/internal/statesync/syncer_test.go +++ b/internal/statesync/syncer_test.go @@ -11,14 +11,16 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/internal/statesync/mocks" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" + "github.com/tendermint/tendermint/pkg/p2p" ssproto "github.com/tendermint/tendermint/proto/tendermint/statesync" "github.com/tendermint/tendermint/proxy" proxymocks "github.com/tendermint/tendermint/proxy/mocks" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -36,19 +38,19 @@ func TestSyncer_SyncAny(t *testing.T) { }, LastBlockHeight: 1, - LastBlockID: types.BlockID{Hash: []byte("blockhash")}, + LastBlockID: metadata.BlockID{Hash: []byte("blockhash")}, LastBlockTime: time.Now(), LastResultsHash: []byte("last_results_hash"), AppHash: []byte("app_hash"), - LastValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val1")}}, - Validators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val2")}}, - NextValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val3")}}, + LastValidators: &consensus.ValidatorSet{Proposer: &consensus.Validator{Address: []byte("val1")}}, + Validators: &consensus.ValidatorSet{Proposer: &consensus.Validator{Address: []byte("val2")}}, + NextValidators: &consensus.ValidatorSet{Proposer: &consensus.Validator{Address: []byte("val3")}}, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), LastHeightConsensusParamsChanged: 1, } - commit := &types.Commit{BlockID: types.BlockID{Hash: []byte("blockhash")}} + commit := &metadata.Commit{BlockID: metadata.BlockID{Hash: []byte("blockhash")}} chunks := []*chunk{ {Height: 1, Format: 1, Index: 0, Chunk: []byte{1, 1, 0}}, @@ -65,9 +67,9 @@ func TestSyncer_SyncAny(t *testing.T) { connSnapshot := &proxymocks.AppConnSnapshot{} connQuery := &proxymocks.AppConnQuery{} - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") - peerCID := types.NodeID("cc") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") + peerCID := p2p.NodeID("cc") rts := setup(t, connSnapshot, connQuery, stateProvider, 3) // Adding a chunk should error when no sync is in progress @@ -216,7 +218,7 @@ func TestSyncer_SyncAny_abort(t *testing.T) { rts := setup(t, nil, nil, stateProvider, 2) s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - peerID := types.NodeID("aa") + peerID := p2p.NodeID("aa") _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) @@ -241,7 +243,7 @@ func TestSyncer_SyncAny_reject(t *testing.T) { s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - peerID := types.NodeID("aa") + peerID := p2p.NodeID("aa") _, err := rts.syncer.AddSnapshot(peerID, s22) require.NoError(t, err) @@ -280,7 +282,7 @@ func TestSyncer_SyncAny_reject_format(t *testing.T) { s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - peerID := types.NodeID("aa") + peerID := p2p.NodeID("aa") _, err := rts.syncer.AddSnapshot(peerID, s22) require.NoError(t, err) @@ -310,9 +312,9 @@ func TestSyncer_SyncAny_reject_sender(t *testing.T) { rts := setup(t, nil, nil, stateProvider, 2) - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") - peerCID := types.NodeID("cc") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") + peerCID := p2p.NodeID("cc") // sbc will be offered first, which will be rejected with reject_sender, causing all snapshots // submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and @@ -359,7 +361,7 @@ func TestSyncer_SyncAny_abciError(t *testing.T) { errBoom := errors.New("boom") s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} - peerID := types.NodeID("aa") + peerID := p2p.NodeID("aa") _, err := rts.syncer.AddSnapshot(peerID, s) require.NoError(t, err) @@ -560,9 +562,9 @@ func TestSyncer_applyChunks_RejectSenders(t *testing.T) { // Set up three peers across two snapshots, and ask for one of them to be banned. // It should be banned from all snapshots. - peerAID := types.NodeID("aa") - peerBID := types.NodeID("bb") - peerCID := types.NodeID("cc") + peerAID := p2p.NodeID("aa") + peerBID := p2p.NodeID("bb") + peerCID := p2p.NodeID("cc") s1 := &snapshot{Height: 1, Format: 1, Chunks: 3} s2 := &snapshot{Height: 2, Format: 1, Chunks: 3} diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go index fd4a94382..23c27b490 100644 --- a/libs/pubsub/example_test.go +++ b/libs/pubsub/example_test.go @@ -6,10 +6,10 @@ import ( "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" ) func TestExample(t *testing.T) { diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 54a030fe8..a0acb42e6 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -39,10 +39,10 @@ import ( "errors" "fmt" - "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" ) type operation int @@ -71,7 +71,7 @@ var ( // allows event types to repeat themselves with the same set of keys and // different values. type Query interface { - Matches(events []types.Event) (bool, error) + Matches(events []abci.Event) (bool, error) String() string } @@ -103,7 +103,7 @@ type cmd struct { // publish msg interface{} - events []types.Event + events []abci.Event } // Server allows clients to subscribe/unsubscribe for messages, publishing @@ -315,13 +315,13 @@ func (s *Server) NumClientSubscriptions(clientID string) int { // Publish publishes the given message. An error will be returned to the caller // if the context is canceled. func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithEvents(ctx, msg, []types.Event{}) + return s.PublishWithEvents(ctx, msg, []abci.Event{}) } // PublishWithEvents publishes the given message with the set of events. The set // is matched with clients queries. If there is a match, the message is sent to // the client. -func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events []types.Event) error { +func (s *Server) PublishWithEvents(ctx context.Context, msg interface{}, events []abci.Event) error { select { case s.cmds <- cmd{op: pub, msg: msg, events: events}: return nil @@ -474,7 +474,7 @@ func (state *state) removeAll(reason error) { } } -func (state *state) send(msg interface{}, events []types.Event) error { +func (state *state) send(msg interface{}, events []abci.Event) error { for qStr, clientSubscriptions := range state.subscriptions { if sub, ok := clientSubscriptions[qStr]; ok && sub.id == qStr { continue diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go index 525415493..0118ff4d2 100644 --- a/libs/pubsub/pubsub_test.go +++ b/libs/pubsub/pubsub_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/libs/pubsub/query/empty.go b/libs/pubsub/query/empty.go index dd6b3f3b2..9d7fa0a8d 100644 --- a/libs/pubsub/query/empty.go +++ b/libs/pubsub/query/empty.go @@ -1,15 +1,13 @@ package query -import ( - "github.com/tendermint/tendermint/abci/types" -) +import "github.com/tendermint/tendermint/pkg/abci" // Empty query matches any set of events. type Empty struct { } // Matches always returns true. -func (Empty) Matches(events []types.Event) (bool, error) { +func (Empty) Matches(events []abci.Event) (bool, error) { return true, nil } diff --git a/libs/pubsub/query/empty_test.go b/libs/pubsub/query/empty_test.go index 4bb3067d6..8a74ef725 100644 --- a/libs/pubsub/query/empty_test.go +++ b/libs/pubsub/query/empty_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" ) func TestEmptyQueryMatchesAnything(t *testing.T) { diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go index 7b1dfe0f9..9b1af5f8a 100644 --- a/libs/pubsub/query/query.go +++ b/libs/pubsub/query/query.go @@ -16,7 +16,7 @@ import ( "strings" "time" - "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" ) var ( @@ -200,7 +200,7 @@ func (q *Query) Conditions() ([]Condition, error) { // // For example, query "name=John" matches events = {"name": ["John", "Eric"]}. // More examples could be found in parser_test.go and query_test.go. -func (q *Query) Matches(rawEvents []types.Event) (bool, error) { +func (q *Query) Matches(rawEvents []abci.Event) (bool, error) { if len(rawEvents) == 0 { return false, nil } @@ -505,7 +505,7 @@ func matchValue(value string, op Operator, operand reflect.Value) (bool, error) return false, nil } -func flattenEvents(events []types.Event) map[string][]string { +func flattenEvents(events []abci.Event) map[string][]string { flattened := make(map[string][]string) for _, event := range events { diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go index 87f61aafe..a96a2739f 100644 --- a/libs/pubsub/query/query_test.go +++ b/libs/pubsub/query/query_test.go @@ -7,8 +7,8 @@ import ( "time" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" ) func expandEvents(flattenedEvents map[string][]string) []abci.Event { diff --git a/libs/pubsub/subscription.go b/libs/pubsub/subscription.go index 40b84711e..abf50f8a2 100644 --- a/libs/pubsub/subscription.go +++ b/libs/pubsub/subscription.go @@ -5,8 +5,8 @@ import ( "fmt" "github.com/google/uuid" - "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/pkg/abci" ) var ( @@ -90,10 +90,10 @@ func (s *Subscription) cancel(err error) { type Message struct { subID string data interface{} - events []types.Event + events []abci.Event } -func NewMessage(subID string, data interface{}, events []types.Event) Message { +func NewMessage(subID string, data interface{}, events []abci.Event) Message { return Message{ subID: subID, data: data, @@ -109,4 +109,4 @@ func (msg Message) SubscriptionID() string { return msg.subID } func (msg Message) Data() interface{} { return msg.data } // Events returns events, which matched the client's query. -func (msg Message) Events() []types.Event { return msg.events } +func (msg Message) Events() []abci.Event { return msg.events } diff --git a/light/client.go b/light/client.go index 52bbdf981..a4b42a9d3 100644 --- a/light/client.go +++ b/light/client.go @@ -14,7 +14,8 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/light/provider" "github.com/tendermint/tendermint/light/store" - "github.com/tendermint/tendermint/types" + types "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" ) type mode byte @@ -451,7 +452,7 @@ func (c *Client) VerifyLightBlockAtHeight(ctx context.Context, height int64, now // If, at any moment, a LightBlock is not found by the primary provider as part of // verification then the provider will be replaced by another and the process will // restart. -func (c *Client) VerifyHeader(ctx context.Context, newHeader *types.Header, now time.Time) error { +func (c *Client) VerifyHeader(ctx context.Context, newHeader *metadata.Header, now time.Time) error { if newHeader == nil { return errors.New("nil header") } @@ -873,12 +874,12 @@ func (c *Client) updateTrustedLightBlock(l *types.LightBlock) error { // replaced with another provider and the operation is repeated. func (c *Client) backwards( ctx context.Context, - trustedHeader *types.Header, - newHeader *types.Header) error { + trustedHeader *metadata.Header, + newHeader *metadata.Header) error { var ( verifiedHeader = trustedHeader - interimHeader *types.Header + interimHeader *metadata.Header ) for verifiedHeader.Height > newHeader.Height { @@ -1069,7 +1070,7 @@ func (c *Client) findNewPrimary(ctx context.Context, height int64, remove bool) // compareFirstHeaderWithWitnesses concurrently compares h with all witnesses. If any // witness reports a different header than h, the function returns an error. -func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *types.SignedHeader) error { +func (c *Client) compareFirstHeaderWithWitnesses(ctx context.Context, h *metadata.SignedHeader) error { compareCtx, cancel := context.WithCancel(ctx) defer cancel() diff --git a/light/client_benchmark_test.go b/light/client_benchmark_test.go index 04ea6d1fc..c6699234d 100644 --- a/light/client_benchmark_test.go +++ b/light/client_benchmark_test.go @@ -11,7 +11,10 @@ import ( "github.com/tendermint/tendermint/light" "github.com/tendermint/tendermint/light/provider" dbs "github.com/tendermint/tendermint/light/store/db" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + types "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" ) // NOTE: block is produced every minute. Make sure the verification time @@ -28,8 +31,8 @@ type providerBenchmarkImpl struct { blocks map[int64]*types.LightBlock } -func newProviderBenchmarkImpl(headers map[int64]*types.SignedHeader, - vals map[int64]*types.ValidatorSet) provider.Provider { +func newProviderBenchmarkImpl(headers map[int64]*metadata.SignedHeader, + vals map[int64]*consensus.ValidatorSet) provider.Provider { impl := providerBenchmarkImpl{ blocks: make(map[int64]*types.LightBlock, len(headers)), } @@ -56,7 +59,7 @@ func (impl *providerBenchmarkImpl) LightBlock(ctx context.Context, height int64) return lb, nil } -func (impl *providerBenchmarkImpl) ReportEvidence(_ context.Context, _ types.Evidence) error { +func (impl *providerBenchmarkImpl) ReportEvidence(_ context.Context, _ evidence.Evidence) error { panic("not implemented") } diff --git a/light/client_test.go b/light/client_test.go index e8a478a53..b191d99ba 100644 --- a/light/client_test.go +++ b/light/client_test.go @@ -20,7 +20,9 @@ import ( "github.com/tendermint/tendermint/light/provider" provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" dbs "github.com/tendermint/tendermint/light/store/db" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + types "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" ) const ( @@ -36,23 +38,23 @@ var ( hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)) // 3/3 signed h2 = keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h1.Hash()}) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), metadata.BlockID{Hash: h1.Hash()}) // 3/3 signed h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, - hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) + hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys), metadata.BlockID{Hash: h2.Hash()}) trustPeriod = 4 * time.Hour trustOptions = light.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: h1.Hash(), } - valSet = map[int64]*types.ValidatorSet{ + valSet = map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, 3: vals, 4: vals, } - headerSet = map[int64]*types.SignedHeader{ + headerSet = map[int64]*metadata.SignedHeader{ 1: h1, // interim header (3/3 signed) 2: h2, @@ -117,8 +119,8 @@ func TestClient_SequentialVerification(t *testing.T) { testCases := []struct { name string - otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet + otherHeaders map[int64]*metadata.SignedHeader // all except ^ + vals map[int64]*consensus.ValidatorSet initErr bool verifyErr bool }{ @@ -131,12 +133,12 @@ func TestClient_SequentialVerification(t *testing.T) { }, { "bad: different first header", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ // different header 1: keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, }, true, @@ -144,8 +146,8 @@ func TestClient_SequentialVerification(t *testing.T) { }, { "bad: no first signed header", - map[int64]*types.SignedHeader{}, - map[int64]*types.ValidatorSet{ + map[int64]*metadata.SignedHeader{}, + map[int64]*consensus.ValidatorSet{ 1: differentVals, }, true, @@ -153,10 +155,10 @@ func TestClient_SequentialVerification(t *testing.T) { }, { "bad: different first validator set", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ 1: h1, }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: differentVals, }, true, @@ -164,7 +166,7 @@ func TestClient_SequentialVerification(t *testing.T) { }, { "bad: 1/3 signed interim header", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ // trusted header 1: h1, // interim header (1/3 signed) @@ -180,7 +182,7 @@ func TestClient_SequentialVerification(t *testing.T) { }, { "bad: 1/3 signed last header", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ // trusted header 1: h1, // interim header (3/3 signed) @@ -197,7 +199,7 @@ func TestClient_SequentialVerification(t *testing.T) { { "bad: different validator set at height 3", headerSet, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, 3: newVals, @@ -252,14 +254,14 @@ func TestClient_SkippingVerification(t *testing.T) { testCases := []struct { name string - otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet + otherHeaders map[int64]*metadata.SignedHeader // all except ^ + vals map[int64]*consensus.ValidatorSet initErr bool verifyErr bool }{ { "good", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ // trusted header 1: h1, // last header (3/3 signed) @@ -271,13 +273,13 @@ func TestClient_SkippingVerification(t *testing.T) { }, { "good, but val set changes by 2/3 (1/3 of vals is still present)", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ // trusted header 1: h1, 3: transitKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, transitVals, transitVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(transitKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, 3: transitVals, @@ -287,7 +289,7 @@ func TestClient_SkippingVerification(t *testing.T) { }, { "good, but val set changes 100% at height 2", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ // trusted header 1: h1, // interim header (3/3 signed) @@ -297,7 +299,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, 3: newVals, @@ -307,7 +309,7 @@ func TestClient_SkippingVerification(t *testing.T) { }, { "bad: last header signed by newVals, interim header has no signers", - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ // trusted header 1: h1, // last header (0/4 of the original val set signed) @@ -317,7 +319,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(newKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, 3: newVals, @@ -599,7 +601,7 @@ func TestClient_Concurrency(t *testing.T) { } func TestClient_AddProviders(t *testing.T) { - mockFullNode := mockNodeFromHeadersAndVals(map[int64]*types.SignedHeader{ + mockFullNode := mockNodeFromHeadersAndVals(map[int64]*metadata.SignedHeader{ 1: h1, 2: h2, }, valSet) @@ -725,12 +727,12 @@ func TestClient_BackwardsVerification(t *testing.T) { } { testCases := []struct { - headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet + headers map[int64]*metadata.SignedHeader + vals map[int64]*consensus.ValidatorSet }{ { // 7) provides incorrect height - headers: map[int64]*types.SignedHeader{ + headers: map[int64]*metadata.SignedHeader{ 2: keys.GenSignedHeader(chainID, 1, bTime.Add(30*time.Minute), nil, vals, vals, hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(keys)), 3: h3, @@ -739,7 +741,7 @@ func TestClient_BackwardsVerification(t *testing.T) { }, { // 8) provides incorrect hash - headers: map[int64]*types.SignedHeader{ + headers: map[int64]*metadata.SignedHeader{ 2: keys.GenSignedHeader(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, hash("app_hash2"), hash("cons_hash23"), hash("results_hash30"), 0, len(keys)), 3: h3, @@ -797,13 +799,13 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { // different headers hash then primary plus less than 1/3 signed (no fork) - headers1 := map[int64]*types.SignedHeader{ + headers1 := map[int64]*metadata.SignedHeader{ 1: h1, 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, hash("app_hash2"), hash("cons_hash"), hash("results_hash"), - len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), + len(keys), len(keys), metadata.BlockID{Hash: h1.Hash()}), } - vals1 := map[int64]*types.ValidatorSet{ + vals1 := map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, } @@ -811,11 +813,11 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { mockBadNode1.On("LightBlock", mock.Anything, mock.Anything).Return(nil, provider.ErrLightBlockNotFound) // header is empty - headers2 := map[int64]*types.SignedHeader{ + headers2 := map[int64]*metadata.SignedHeader{ 1: h1, 2: h2, } - vals2 := map[int64]*types.ValidatorSet{ + vals2 := map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, } @@ -861,24 +863,24 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { func TestClient_TrustedValidatorSet(t *testing.T) { differentVals, _ := factory.RandValidatorSet(10, 100) mockBadValSetNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ 1: h1, // 3/3 signed, but validator set at height 2 below is invalid -> witness // should be removed. 2: keys.GenSignedHeaderLastBlockID(chainID, 2, bTime.Add(30*time.Minute), nil, vals, vals, hash("app_hash2"), hash("cons_hash"), hash("results_hash"), - 0, len(keys), types.BlockID{Hash: h1.Hash()}), + 0, len(keys), metadata.BlockID{Hash: h1.Hash()}), }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, 2: differentVals, }) mockFullNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ 1: h1, 2: h2, }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, 2: vals, }) @@ -904,12 +906,12 @@ func TestClient_TrustedValidatorSet(t *testing.T) { func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { mockFullNode := mockNodeFromHeadersAndVals( - map[int64]*types.SignedHeader{ + map[int64]*metadata.SignedHeader{ 1: h1, 3: h3, 0: h3, }, - map[int64]*types.ValidatorSet{ + map[int64]*consensus.ValidatorSet{ 1: vals, 3: vals, 0: vals, @@ -939,14 +941,14 @@ func TestClientPrunesHeadersAndValidatorSets(t *testing.T) { } func TestClientEnsureValidHeadersAndValSets(t *testing.T) { - emptyValSet := &types.ValidatorSet{ + emptyValSet := &consensus.ValidatorSet{ Validators: nil, Proposer: nil, } testCases := []struct { - headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet + headers map[int64]*metadata.SignedHeader + vals map[int64]*consensus.ValidatorSet errorToThrow error errorHeight int64 @@ -954,21 +956,21 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) { err bool }{ { - headers: map[int64]*types.SignedHeader{ + headers: map[int64]*metadata.SignedHeader{ 1: h1, 3: h3, }, - vals: map[int64]*types.ValidatorSet{ + vals: map[int64]*consensus.ValidatorSet{ 1: vals, 3: vals, }, err: false, }, { - headers: map[int64]*types.SignedHeader{ + headers: map[int64]*metadata.SignedHeader{ 1: h1, }, - vals: map[int64]*types.ValidatorSet{ + vals: map[int64]*consensus.ValidatorSet{ 1: vals, }, errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, @@ -976,7 +978,7 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) { err: true, }, { - headers: map[int64]*types.SignedHeader{ + headers: map[int64]*metadata.SignedHeader{ 1: h1, }, errorToThrow: provider.ErrBadLightBlock{Reason: errors.New("nil header or vals")}, @@ -985,11 +987,11 @@ func TestClientEnsureValidHeadersAndValSets(t *testing.T) { err: true, }, { - headers: map[int64]*types.SignedHeader{ + headers: map[int64]*metadata.SignedHeader{ 1: h1, 3: h3, }, - vals: map[int64]*types.ValidatorSet{ + vals: map[int64]*consensus.ValidatorSet{ 1: vals, 3: emptyValSet, }, diff --git a/light/detector.go b/light/detector.go index 32a0c3f1e..9a9472461 100644 --- a/light/detector.go +++ b/light/detector.go @@ -8,7 +8,9 @@ import ( "time" "github.com/tendermint/tendermint/light/provider" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/evidence" + lighttypes "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" ) // The detector component of the light client detects and handles attacks on the light client. @@ -25,7 +27,7 @@ import ( // // If there are no conflictinge headers, the light client deems the verified target header // trusted and saves it to the trusted store. -func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.LightBlock, now time.Time) error { +func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*lighttypes.LightBlock, now time.Time) error { if primaryTrace == nil || len(primaryTrace) < 2 { return errors.New("nil or single block primary trace") } @@ -107,7 +109,7 @@ func (c *Client) detectDivergence(ctx context.Context, primaryTrace []*types.Lig // 2: errBadWitness -> the witness has either not responded, doesn't have the header or has given us an invalid one // Note: In the case of an invalid header we remove the witness // 3: nil -> the hashes of the two headers match -func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *types.SignedHeader, +func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan error, h *metadata.SignedHeader, witness provider.Provider, witnessIndex int) { lightBlock, err := witness.LightBlock(ctx, h.Height) @@ -203,7 +205,7 @@ func (c *Client) compareNewHeaderWithWitness(ctx context.Context, errc chan erro } // sendEvidence sends evidence to a provider on a best effort basis. -func (c *Client) sendEvidence(ctx context.Context, ev *types.LightClientAttackEvidence, receiver provider.Provider) { +func (c *Client) sendEvidence(ctx context.Context, ev *evidence.LightClientAttackEvidence, receiver provider.Provider) { err := receiver.ReportEvidence(ctx, ev) if err != nil { c.logger.Error("failed to report evidence to provider", "ev", ev, "provider", receiver) @@ -214,8 +216,8 @@ func (c *Client) sendEvidence(ctx context.Context, ev *types.LightClientAttackEv // two headers of the same height but with different hashes func (c *Client) handleConflictingHeaders( ctx context.Context, - primaryTrace []*types.LightBlock, - challendingBlock *types.LightBlock, + primaryTrace []*lighttypes.LightBlock, + challendingBlock *lighttypes.LightBlock, witnessIndex int, now time.Time, ) error { @@ -287,14 +289,14 @@ func (c *Client) handleConflictingHeaders( // 3. The func (c *Client) examineConflictingHeaderAgainstTrace( ctx context.Context, - trace []*types.LightBlock, - targetBlock *types.LightBlock, + trace []*lighttypes.LightBlock, + targetBlock *lighttypes.LightBlock, source provider.Provider, now time.Time, -) ([]*types.LightBlock, *types.LightBlock, error) { +) ([]*lighttypes.LightBlock, *lighttypes.LightBlock, error) { var ( - previouslyVerifiedBlock, sourceBlock *types.LightBlock - sourceTrace []*types.LightBlock + previouslyVerifiedBlock, sourceBlock *lighttypes.LightBlock + sourceTrace []*lighttypes.LightBlock err error ) @@ -378,7 +380,7 @@ func (c *Client) getTargetBlockOrLatest( ctx context.Context, height int64, witness provider.Provider, -) (bool, *types.LightBlock, error) { +) (bool, *lighttypes.LightBlock, error) { lightBlock, err := witness.LightBlock(ctx, 0) if err != nil { return false, nil, err @@ -403,8 +405,8 @@ func (c *Client) getTargetBlockOrLatest( // newLightClientAttackEvidence determines the type of attack and then forms the evidence filling out // all the fields such that it is ready to be sent to a full node. -func newLightClientAttackEvidence(conflicted, trusted, common *types.LightBlock) *types.LightClientAttackEvidence { - ev := &types.LightClientAttackEvidence{ConflictingBlock: conflicted} +func newLightClientAttackEvidence(conflicted, trusted, common *lighttypes.LightBlock) *evidence.LightClientAttackEvidence { + ev := &evidence.LightClientAttackEvidence{ConflictingBlock: conflicted} // We use the common height to indicate the form of the attack. // if this is an equivocation or amnesia attack, i.e. the validator sets are the same, then we // return the height of the conflicting block as the common height. If instead it is a lunatic diff --git a/light/detector_test.go b/light/detector_test.go index 0bf96ace6..d737c04a9 100644 --- a/light/detector_test.go +++ b/light/detector_test.go @@ -16,7 +16,11 @@ import ( "github.com/tendermint/tendermint/light/provider" provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" dbs "github.com/tendermint/tendermint/light/store/db" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + lighttypes "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" ) func TestLightClientAttackEvidence_Lunatic(t *testing.T) { @@ -25,8 +29,8 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { latestHeight = int64(3) valSize = 5 divergenceHeight = int64(2) - primaryHeaders = make(map[int64]*types.SignedHeader, latestHeight) - primaryValidators = make(map[int64]*types.ValidatorSet, latestHeight) + primaryHeaders = make(map[int64]*metadata.SignedHeader, latestHeight) + primaryValidators = make(map[int64]*consensus.ValidatorSet, latestHeight) ) witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime) @@ -52,29 +56,29 @@ func TestLightClientAttackEvidence_Lunatic(t *testing.T) { mockWitness := mockNodeFromHeadersAndVals(witnessHeaders, witnessValidators) mockPrimary := mockNodeFromHeadersAndVals(primaryHeaders, primaryValidators) - mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { - evAgainstPrimary := &types.LightClientAttackEvidence{ + mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(ev evidence.Evidence) bool { + evAgainstPrimary := &evidence.LightClientAttackEvidence{ // after the divergence height the valset doesn't change so we expect the evidence to be for the latest height - ConflictingBlock: &types.LightBlock{ + ConflictingBlock: &lighttypes.LightBlock{ SignedHeader: primaryHeaders[latestHeight], ValidatorSet: primaryValidators[latestHeight], }, CommonHeight: 1, } - return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash()) + return bytes.Equal(ev.Hash(), evAgainstPrimary.Hash()) })).Return(nil) - mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { - evAgainstWitness := &types.LightClientAttackEvidence{ + mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(ev evidence.Evidence) bool { + evAgainstWitness := &evidence.LightClientAttackEvidence{ // when forming evidence against witness we learn that the canonical chain continued to change validator sets // hence the conflicting block is at 7 - ConflictingBlock: &types.LightBlock{ + ConflictingBlock: &lighttypes.LightBlock{ SignedHeader: witnessHeaders[divergenceHeight+1], ValidatorSet: witnessValidators[divergenceHeight+1], }, CommonHeight: divergenceHeight - 1, } - return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash()) + return bytes.Equal(ev.Hash(), evAgainstWitness.Hash()) })).Return(nil) c, err := light.NewClient( @@ -134,9 +138,9 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { // primary performs an equivocation attack var ( valSize = 5 - primaryHeaders = make(map[int64]*types.SignedHeader, testCase.latestHeight) + primaryHeaders = make(map[int64]*metadata.SignedHeader, testCase.latestHeight) // validators don't change in this network (however we still use a map just for convenience) - primaryValidators = make(map[int64]*types.ValidatorSet, testCase.latestHeight) + primaryValidators = make(map[int64]*consensus.ValidatorSet, testCase.latestHeight) ) witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, testCase.latestHeight+1, valSize, 2, bTime) @@ -149,7 +153,7 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { // we don't have a network partition so we will make 4/5 (greater than 2/3) malicious and vote again for // a different block (which we do by adding txs) primaryHeaders[height] = chainKeys[height].GenSignedHeader(chainID, height, - bTime.Add(time.Duration(height)*time.Minute), []types.Tx{[]byte("abcd")}, + bTime.Add(time.Duration(height)*time.Minute), []mempool.Tx{[]byte("abcd")}, witnessValidators[height], witnessValidators[height+1], hash("app_hash"), hash("cons_hash"), hash("results_hash"), 0, len(chainKeys[height])-1) primaryValidators[height] = witnessValidators[height] @@ -167,25 +171,25 @@ func TestLightClientAttackEvidence_Equivocation(t *testing.T) { // Check evidence was sent to both full nodes. // Common height should be set to the height of the divergent header in the instance // of an equivocation attack and the validator sets are the same as what the witness has - mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { - evAgainstPrimary := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ + mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(ev evidence.Evidence) bool { + evAgainstPrimary := &evidence.LightClientAttackEvidence{ + ConflictingBlock: &lighttypes.LightBlock{ SignedHeader: primaryHeaders[testCase.divergenceHeight], ValidatorSet: primaryValidators[testCase.divergenceHeight], }, CommonHeight: testCase.divergenceHeight, } - return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash()) + return bytes.Equal(ev.Hash(), evAgainstPrimary.Hash()) })).Return(nil) - mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { - evAgainstWitness := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ + mockPrimary.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(ev evidence.Evidence) bool { + evAgainstWitness := &evidence.LightClientAttackEvidence{ + ConflictingBlock: &lighttypes.LightBlock{ SignedHeader: witnessHeaders[testCase.divergenceHeight], ValidatorSet: witnessValidators[testCase.divergenceHeight], }, CommonHeight: testCase.divergenceHeight, } - return bytes.Equal(evidence.Hash(), evAgainstWitness.Hash()) + return bytes.Equal(ev.Hash(), evAgainstWitness.Hash()) })).Return(nil) c, err := light.NewClient( @@ -224,8 +228,8 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { valSize = 5 forgedHeight = int64(12) proofHeight = int64(11) - primaryHeaders = make(map[int64]*types.SignedHeader, forgedHeight) - primaryValidators = make(map[int64]*types.ValidatorSet, forgedHeight) + primaryHeaders = make(map[int64]*metadata.SignedHeader, forgedHeight) + primaryValidators = make(map[int64]*consensus.ValidatorSet, forgedHeight) ) witnessHeaders, witnessValidators, chainKeys := genLightBlocksWithKeys(chainID, latestHeight, valSize, 2, bTime) @@ -271,16 +275,16 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { mockWitness.On("LightBlock", mock.Anything, int64(0)).Return(lastBlock, nil).Once() mockWitness.On("LightBlock", mock.Anything, int64(12)).Return(nil, provider.ErrHeightTooHigh) - mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(evidence types.Evidence) bool { + mockWitness.On("ReportEvidence", mock.Anything, mock.MatchedBy(func(ev evidence.Evidence) bool { // Check evidence was sent to the witness against the full node - evAgainstPrimary := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ + evAgainstPrimary := &evidence.LightClientAttackEvidence{ + ConflictingBlock: &lighttypes.LightBlock{ SignedHeader: primaryHeaders[forgedHeight], ValidatorSet: primaryValidators[forgedHeight], }, CommonHeight: latestHeight, } - return bytes.Equal(evidence.Hash(), evAgainstPrimary.Hash()) + return bytes.Equal(ev.Hash(), evAgainstPrimary.Hash()) })).Return(nil).Twice() // In order to perform the attack, the primary needs at least one accomplice as a witness to also @@ -307,7 +311,7 @@ func TestLightClientAttackEvidence_ForwardLunatic(t *testing.T) { // two seconds later, the supporting withness should receive the header that can be used // to prove that there was an attack vals := chainKeys[latestHeight].ToValidators(2, 0) - newLb := &types.LightBlock{ + newLb := &lighttypes.LightBlock{ SignedHeader: chainKeys[latestHeight].GenSignedHeader( chainID, proofHeight, diff --git a/light/errors.go b/light/errors.go index c06ff1a94..39fb535b5 100644 --- a/light/errors.go +++ b/light/errors.go @@ -5,7 +5,8 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/light" ) // ErrOldHeaderExpired means the old (trusted) header has expired according to @@ -23,7 +24,7 @@ func (e ErrOldHeaderExpired) Error() string { // ErrNewValSetCantBeTrusted means the new validator set cannot be trusted // because < 1/3rd (+trustLevel+) of the old validator set has signed. type ErrNewValSetCantBeTrusted struct { - Reason types.ErrNotEnoughVotingPowerSigned + Reason consensus.ErrNotEnoughVotingPowerSigned } func (e ErrNewValSetCantBeTrusted) Error() string { @@ -80,7 +81,7 @@ var ErrNoWitnesses = errors.New("no witnesses connected. please reset light clie // ErrConflictingHeaders is thrown when two conflicting headers are discovered. type errConflictingHeaders struct { - Block *types.LightBlock + Block *light.LightBlock WitnessIndex int } diff --git a/light/helpers_test.go b/light/helpers_test.go index 1d25f9166..2e4ac066e 100644 --- a/light/helpers_test.go +++ b/light/helpers_test.go @@ -9,8 +9,11 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" tmtime "github.com/tendermint/tendermint/libs/time" provider_mocks "github.com/tendermint/tendermint/light/provider/mocks" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -65,24 +68,24 @@ func (pkz privKeys) Extend(n int) privKeys { // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution // (should be enough for testing). -func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { - res := make([]*types.Validator, len(pkz)) +func (pkz privKeys) ToValidators(init, inc int64) *consensus.ValidatorSet { + res := make([]*consensus.Validator, len(pkz)) for i, k := range pkz { - res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) + res[i] = consensus.NewValidator(k.PubKey(), init+int64(i)*inc) } - return types.NewValidatorSet(res) + return consensus.NewValidatorSet(res) } // signHeader properly signs the header with all keys from first to last exclusive. -func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, first, last int) *types.Commit { - commitSigs := make([]types.CommitSig, len(pkz)) +func (pkz privKeys) signHeader(header *metadata.Header, valSet *consensus.ValidatorSet, first, last int) *metadata.Commit { + commitSigs := make([]metadata.CommitSig, len(pkz)) for i := 0; i < len(pkz); i++ { - commitSigs[i] = types.NewCommitSigAbsent() + commitSigs[i] = metadata.NewCommitSigAbsent() } - blockID := types.BlockID{ + blockID := metadata.BlockID{ Hash: header.Hash(), - PartSetHeader: types.PartSetHeader{Total: 1, Hash: crypto.CRandBytes(32)}, + PartSetHeader: metadata.PartSetHeader{Total: 1, Hash: crypto.CRandBytes(32)}, } // Fill in the votes we want. @@ -91,15 +94,15 @@ func (pkz privKeys) signHeader(header *types.Header, valSet *types.ValidatorSet, commitSigs[vote.ValidatorIndex] = vote.CommitSig() } - return types.NewCommit(header.Height, 1, blockID, commitSigs) + return metadata.NewCommit(header.Height, 1, blockID, commitSigs) } -func makeVote(header *types.Header, valset *types.ValidatorSet, - key crypto.PrivKey, blockID types.BlockID) *types.Vote { +func makeVote(header *metadata.Header, valset *consensus.ValidatorSet, + key crypto.PrivKey, blockID metadata.BlockID) *consensus.Vote { addr := key.PubKey().Address() idx, _ := valset.GetByAddress(addr) - vote := &types.Vote{ + vote := &consensus.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, Height: header.Height, @@ -111,7 +114,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, v := vote.ToProto() // Sign it - signBytes := types.VoteSignBytes(header.ChainID, v) + signBytes := consensus.VoteSignBytes(header.ChainID, v) sig, err := key.Sign(signBytes) if err != nil { panic(err) @@ -122,10 +125,10 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, return vote } -func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { +func genHeader(chainID string, height int64, bTime time.Time, txs mempool.Txs, + valset, nextValset *consensus.ValidatorSet, appHash, consHash, resHash []byte) *metadata.Header { - return &types.Header{ + return &metadata.Header{ Version: version.Consensus{Block: version.BlockProtocol, App: 0}, ChainID: chainID, Height: height, @@ -143,24 +146,24 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, } // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. -func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { +func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs mempool.Txs, + valset, nextValset *consensus.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *metadata.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) - return &types.SignedHeader{ + return &metadata.SignedHeader{ Header: header, Commit: pkz.signHeader(header, valset, first, last), } } // GenSignedHeaderLastBlockID calls genHeader and signHeader and combines them into a SignedHeader. -func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int, - lastBlockID types.BlockID) *types.SignedHeader { +func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTime time.Time, txs mempool.Txs, + valset, nextValset *consensus.ValidatorSet, appHash, consHash, resHash []byte, first, last int, + lastBlockID metadata.BlockID) *metadata.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) header.LastBlockID = lastBlockID - return &types.SignedHeader{ + return &metadata.SignedHeader{ Header: header, Commit: pkz.signHeader(header, valset, first, last), } @@ -180,13 +183,13 @@ func genLightBlocksWithKeys( valSize int, valVariation float32, bTime time.Time) ( - map[int64]*types.SignedHeader, - map[int64]*types.ValidatorSet, + map[int64]*metadata.SignedHeader, + map[int64]*consensus.ValidatorSet, map[int64]privKeys) { var ( - headers = make(map[int64]*types.SignedHeader, numBlocks) - valset = make(map[int64]*types.ValidatorSet, numBlocks+1) + headers = make(map[int64]*metadata.SignedHeader, numBlocks) + valset = make(map[int64]*consensus.ValidatorSet, numBlocks+1) keymap = make(map[int64]privKeys, numBlocks+1) keys = genPrivKeys(valSize) totalVariation = valVariation @@ -217,7 +220,7 @@ func genLightBlocksWithKeys( currentHeader = keys.GenSignedHeaderLastBlockID(chainID, height, bTime.Add(time.Duration(height)*time.Minute), nil, keys.ToValidators(2, 0), newKeys.ToValidators(2, 0), hash("app_hash"), hash("cons_hash"), - hash("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()}) + hash("results_hash"), 0, len(keys), metadata.BlockID{Hash: lastHeader.Hash()}) headers[height] = currentHeader valset[height] = keys.ToValidators(2, 0) lastHeader = currentHeader @@ -228,11 +231,11 @@ func genLightBlocksWithKeys( return headers, valset, keymap } -func mockNodeFromHeadersAndVals(headers map[int64]*types.SignedHeader, - vals map[int64]*types.ValidatorSet) *provider_mocks.Provider { +func mockNodeFromHeadersAndVals(headers map[int64]*metadata.SignedHeader, + vals map[int64]*consensus.ValidatorSet) *provider_mocks.Provider { mockNode := &provider_mocks.Provider{} for i, header := range headers { - lb := &types.LightBlock{SignedHeader: header, ValidatorSet: vals[i]} + lb := &light.LightBlock{SignedHeader: header, ValidatorSet: vals[i]} mockNode.On("LightBlock", mock.Anything, i).Return(lb, nil) } return mockNode diff --git a/light/light_test.go b/light/light_test.go index f5d2ddd89..0096c870f 100644 --- a/light/light_test.go +++ b/light/light_test.go @@ -16,8 +16,8 @@ import ( "github.com/tendermint/tendermint/light/provider" httpp "github.com/tendermint/tendermint/light/provider/http" dbs "github.com/tendermint/tendermint/light/store/db" + lighttypes "github.com/tendermint/tendermint/pkg/light" rpctest "github.com/tendermint/tendermint/rpc/test" - "github.com/tendermint/tendermint/types" ) // NOTE: these are ports of the tests from example_test.go but @@ -143,7 +143,7 @@ func TestClientIntegration_VerifyLightBlockAtHeight(t *testing.T) { require.EqualValues(t, 3, h.Height) } -func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*types.LightBlock, error) { +func waitForBlock(ctx context.Context, p provider.Provider, height int64) (*lighttypes.LightBlock, error) { for { block, err := p.LightBlock(ctx, height) switch err { diff --git a/light/mbt/driver_test.go b/light/mbt/driver_test.go index bf6ab3d43..9dd9b04f6 100644 --- a/light/mbt/driver_test.go +++ b/light/mbt/driver_test.go @@ -10,7 +10,8 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/light" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" ) const jsonDir = "./json" @@ -101,10 +102,10 @@ type testCase struct { } type initialData struct { - SignedHeader types.SignedHeader `json:"signed_header"` - NextValidatorSet types.ValidatorSet `json:"next_validator_set"` - TrustingPeriod uint64 `json:"trusting_period"` - Now time.Time `json:"now"` + SignedHeader metadata.SignedHeader `json:"signed_header"` + NextValidatorSet consensus.ValidatorSet `json:"next_validator_set"` + TrustingPeriod uint64 `json:"trusting_period"` + Now time.Time `json:"now"` } type inputData struct { @@ -116,7 +117,7 @@ type inputData struct { // In tendermint-rs, NextValidatorSet is used to verify new blocks (opposite to // Go tendermint). type lightBlockWithNextValidatorSet struct { - *types.SignedHeader `json:"signed_header"` - ValidatorSet *types.ValidatorSet `json:"validator_set"` - NextValidatorSet *types.ValidatorSet `json:"next_validator_set"` + *metadata.SignedHeader `json:"signed_header"` + ValidatorSet *consensus.ValidatorSet `json:"validator_set"` + NextValidatorSet *consensus.ValidatorSet `json:"next_validator_set"` } diff --git a/light/provider/http/http.go b/light/provider/http/http.go index ceea0f6d2..42bc8c578 100644 --- a/light/provider/http/http.go +++ b/light/provider/http/http.go @@ -10,11 +10,14 @@ import ( "time" "github.com/tendermint/tendermint/light/provider" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + types "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" rpcclient "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) var defaultOptions = Options{ @@ -148,12 +151,12 @@ func (p *http) LightBlock(ctx context.Context, height int64) (*types.LightBlock, } // ReportEvidence calls `/broadcast_evidence` endpoint. -func (p *http) ReportEvidence(ctx context.Context, ev types.Evidence) error { +func (p *http) ReportEvidence(ctx context.Context, ev evidence.Evidence) error { _, err := p.client.BroadcastEvidence(ctx, ev) return err } -func (p *http) validatorSet(ctx context.Context, height *int64) (*types.ValidatorSet, error) { +func (p *http) validatorSet(ctx context.Context, height *int64) (*consensus.ValidatorSet, error) { // Since the malicious node could report a massive number of pages, making us // spend a considerable time iterating, we restrict the number of pages here. // => 10000 validators max @@ -161,7 +164,7 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato var ( perPage = 100 - vals = []*types.Validator{} + vals = []*consensus.Validator{} page = 1 total = -1 ) @@ -224,14 +227,14 @@ func (p *http) validatorSet(ctx context.Context, height *int64) (*types.Validato } } - valSet, err := types.ValidatorSetFromExistingValidators(vals) + valSet, err := consensus.ValidatorSetFromExistingValidators(vals) if err != nil { return nil, provider.ErrBadLightBlock{Reason: err} } return valSet, nil } -func (p *http) signedHeader(ctx context.Context, height *int64) (*types.SignedHeader, error) { +func (p *http) signedHeader(ctx context.Context, height *int64) (*metadata.SignedHeader, error) { // create a for loop to control retries. If p.maxRetryAttempts // is negative we will keep repeating. for attempt := uint16(0); attempt != p.maxRetryAttempts+1; attempt++ { diff --git a/light/provider/http/http_test.go b/light/provider/http/http_test.go index adcb69fb9..ca90fa80e 100644 --- a/light/provider/http/http_test.go +++ b/light/provider/http/http_test.go @@ -12,10 +12,10 @@ import ( "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/light/provider" lighthttp "github.com/tendermint/tendermint/light/provider/http" + "github.com/tendermint/tendermint/pkg/consensus" rpcclient "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpctest "github.com/tendermint/tendermint/rpc/test" - "github.com/tendermint/tendermint/types" ) func TestNewProvider(t *testing.T) { @@ -44,7 +44,7 @@ func TestProvider(t *testing.T) { require.NoError(t, err) rpcAddr := cfg.RPC.ListenAddress - genDoc, err := types.GenesisDocFromFile(cfg.GenesisFile()) + genDoc, err := consensus.GenesisDocFromFile(cfg.GenesisFile()) require.NoError(t, err) chainID := genDoc.ChainID diff --git a/light/provider/mocks/provider.go b/light/provider/mocks/provider.go index aa36fa2d3..7214f9a75 100644 --- a/light/provider/mocks/provider.go +++ b/light/provider/mocks/provider.go @@ -5,9 +5,10 @@ package mocks import ( context "context" - mock "github.com/stretchr/testify/mock" + evidence "github.com/tendermint/tendermint/pkg/evidence" + light "github.com/tendermint/tendermint/pkg/light" - types "github.com/tendermint/tendermint/types" + mock "github.com/stretchr/testify/mock" ) // Provider is an autogenerated mock type for the Provider type @@ -16,15 +17,15 @@ type Provider struct { } // LightBlock provides a mock function with given fields: ctx, height -func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) { +func (_m *Provider) LightBlock(ctx context.Context, height int64) (*light.LightBlock, error) { ret := _m.Called(ctx, height) - var r0 *types.LightBlock - if rf, ok := ret.Get(0).(func(context.Context, int64) *types.LightBlock); ok { + var r0 *light.LightBlock + if rf, ok := ret.Get(0).(func(context.Context, int64) *light.LightBlock); ok { r0 = rf(ctx, height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.LightBlock) + r0 = ret.Get(0).(*light.LightBlock) } } @@ -39,11 +40,11 @@ func (_m *Provider) LightBlock(ctx context.Context, height int64) (*types.LightB } // ReportEvidence provides a mock function with given fields: _a0, _a1 -func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 types.Evidence) error { +func (_m *Provider) ReportEvidence(_a0 context.Context, _a1 evidence.Evidence) error { ret := _m.Called(_a0, _a1) var r0 error - if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) error); ok { + if rf, ok := ret.Get(0).(func(context.Context, evidence.Evidence) error); ok { r0 = rf(_a0, _a1) } else { r0 = ret.Error(0) diff --git a/light/provider/provider.go b/light/provider/provider.go index 7f15d5c75..ff19b84be 100644 --- a/light/provider/provider.go +++ b/light/provider/provider.go @@ -3,7 +3,8 @@ package provider import ( "context" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/light" ) //go:generate ../../scripts/mockery_generate.sh Provider @@ -21,8 +22,8 @@ type Provider interface { // issues, an error will be returned. // If there's no LightBlock for the given height, ErrLightBlockNotFound // error is returned. - LightBlock(ctx context.Context, height int64) (*types.LightBlock, error) + LightBlock(ctx context.Context, height int64) (*light.LightBlock, error) // ReportEvidence reports an evidence of misbehavior. - ReportEvidence(context.Context, types.Evidence) error + ReportEvidence(context.Context, evidence.Evidence) error } diff --git a/light/proxy/routes.go b/light/proxy/routes.go index 62d70f545..e3c28d28b 100644 --- a/light/proxy/routes.go +++ b/light/proxy/routes.go @@ -3,11 +3,12 @@ package proxy import ( "github.com/tendermint/tendermint/libs/bytes" lrpc "github.com/tendermint/tendermint/light/rpc" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/mempool" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcserver "github.com/tendermint/tendermint/rpc/jsonrpc/server" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) func RPCRoutes(c *lrpc.Client) map[string]*rpcserver.RPCFunc { @@ -230,26 +231,26 @@ func makeNumUnconfirmedTxsFunc(c *lrpc.Client) rpcNumUnconfirmedTxsFunc { } } -type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) +type rpcBroadcastTxCommitFunc func(ctx *rpctypes.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) func makeBroadcastTxCommitFunc(c *lrpc.Client) rpcBroadcastTxCommitFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { + return func(ctx *rpctypes.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.BroadcastTxCommit(ctx.Context(), tx) } } -type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxSyncFunc func(ctx *rpctypes.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) func makeBroadcastTxSyncFunc(c *lrpc.Client) rpcBroadcastTxSyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.BroadcastTxSync(ctx.Context(), tx) } } -type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) +type rpcBroadcastTxAsyncFunc func(ctx *rpctypes.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) func makeBroadcastTxAsyncFunc(c *lrpc.Client) rpcBroadcastTxAsyncFunc { - return func(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { + return func(ctx *rpctypes.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.BroadcastTxAsync(ctx.Context(), tx) } } @@ -276,11 +277,11 @@ func makeABCIInfoFunc(c *lrpc.Client) rpcABCIInfoFunc { } } -type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) +type rpcBroadcastEvidenceFunc func(ctx *rpctypes.Context, ev evidence.Evidence) (*ctypes.ResultBroadcastEvidence, error) // nolint: interfacer func makeBroadcastEvidenceFunc(c *lrpc.Client) rpcBroadcastEvidenceFunc { - return func(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + return func(ctx *rpctypes.Context, ev evidence.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.BroadcastEvidence(ctx.Context(), ev) } } diff --git a/light/rpc/client.go b/light/rpc/client.go index 48cf7ce73..b21d31b21 100644 --- a/light/rpc/client.go +++ b/light/rpc/client.go @@ -10,15 +10,17 @@ import ( "github.com/gogo/protobuf/proto" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/merkle" tmbytes "github.com/tendermint/tendermint/libs/bytes" tmmath "github.com/tendermint/tendermint/libs/math" service "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/evidence" + types "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/mempool" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // KeyPathFunc builds a merkle path out of the given path and key. @@ -188,15 +190,15 @@ func (c *Client) ABCIQueryWithOptions(ctx context.Context, path string, data tmb return &ctypes.ResultABCIQuery{Response: resp}, nil } -func (c *Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Client) BroadcastTxCommit(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.next.BroadcastTxCommit(ctx, tx) } -func (c *Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxAsync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.next.BroadcastTxAsync(ctx, tx) } -func (c *Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Client) BroadcastTxSync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.next.BroadcastTxSync(ctx, tx) } @@ -208,7 +210,7 @@ func (c *Client) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirm return c.next.NumUnconfirmedTxs(ctx) } -func (c *Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Client) CheckTx(ctx context.Context, tx mempool.Tx) (*ctypes.ResultCheckTx, error) { return c.next.CheckTx(ctx, tx) } @@ -416,7 +418,7 @@ func (c *Client) BlockResults(ctx context.Context, height *int64) (*ctypes.Resul } // Build a Merkle tree of proto-encoded DeliverTx results and get a hash. - results := types.NewResults(res.TxsResults) + results := abci.NewResults(res.TxsResults) // proto-encode EndBlock events. ebeBytes, err := proto.Marshal(&abci.ResponseEndBlock{ @@ -525,7 +527,7 @@ func (c *Client) Validators( Total: totalCount}, nil } -func (c *Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Client) BroadcastEvidence(ctx context.Context, ev evidence.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.next.BroadcastEvidence(ctx, ev) } diff --git a/light/rpc/mocks/light_client.go b/light/rpc/mocks/light_client.go index cc32cf649..188ff0242 100644 --- a/light/rpc/mocks/light_client.go +++ b/light/rpc/mocks/light_client.go @@ -6,10 +6,9 @@ import ( context "context" mock "github.com/stretchr/testify/mock" + light "github.com/tendermint/tendermint/pkg/light" time "time" - - types "github.com/tendermint/tendermint/types" ) // LightClient is an autogenerated mock type for the LightClient type @@ -32,15 +31,15 @@ func (_m *LightClient) ChainID() string { } // TrustedLightBlock provides a mock function with given fields: height -func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error) { +func (_m *LightClient) TrustedLightBlock(height int64) (*light.LightBlock, error) { ret := _m.Called(height) - var r0 *types.LightBlock - if rf, ok := ret.Get(0).(func(int64) *types.LightBlock); ok { + var r0 *light.LightBlock + if rf, ok := ret.Get(0).(func(int64) *light.LightBlock); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.LightBlock) + r0 = ret.Get(0).(*light.LightBlock) } } @@ -55,15 +54,15 @@ func (_m *LightClient) TrustedLightBlock(height int64) (*types.LightBlock, error } // Update provides a mock function with given fields: ctx, now -func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightBlock, error) { +func (_m *LightClient) Update(ctx context.Context, now time.Time) (*light.LightBlock, error) { ret := _m.Called(ctx, now) - var r0 *types.LightBlock - if rf, ok := ret.Get(0).(func(context.Context, time.Time) *types.LightBlock); ok { + var r0 *light.LightBlock + if rf, ok := ret.Get(0).(func(context.Context, time.Time) *light.LightBlock); ok { r0 = rf(ctx, now) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.LightBlock) + r0 = ret.Get(0).(*light.LightBlock) } } @@ -78,15 +77,15 @@ func (_m *LightClient) Update(ctx context.Context, now time.Time) (*types.LightB } // VerifyLightBlockAtHeight provides a mock function with given fields: ctx, height, now -func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*types.LightBlock, error) { +func (_m *LightClient) VerifyLightBlockAtHeight(ctx context.Context, height int64, now time.Time) (*light.LightBlock, error) { ret := _m.Called(ctx, height, now) - var r0 *types.LightBlock - if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) *types.LightBlock); ok { + var r0 *light.LightBlock + if rf, ok := ret.Get(0).(func(context.Context, int64, time.Time) *light.LightBlock); ok { r0 = rf(ctx, height, now) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.LightBlock) + r0 = ret.Get(0).(*light.LightBlock) } } diff --git a/light/store/db/db.go b/light/store/db/db.go index acfda1f79..ed50d9a34 100644 --- a/light/store/db/db.go +++ b/light/store/db/db.go @@ -9,8 +9,8 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/light/store" + "github.com/tendermint/tendermint/pkg/light" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) const ( @@ -45,7 +45,7 @@ func New(db dbm.DB) store.Store { // SaveLightBlock persists LightBlock to the db. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) SaveLightBlock(lb *types.LightBlock) error { +func (s *dbs) SaveLightBlock(lb *light.LightBlock) error { if lb.Height <= 0 { panic("negative or zero height") } @@ -110,7 +110,7 @@ func (s *dbs) DeleteLightBlock(height int64) error { // LightBlock retrieves the LightBlock at the given height. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) LightBlock(height int64) (*types.LightBlock, error) { +func (s *dbs) LightBlock(height int64) (*light.LightBlock, error) { if height <= 0 { panic("negative or zero height") } @@ -129,7 +129,7 @@ func (s *dbs) LightBlock(height int64) (*types.LightBlock, error) { return nil, fmt.Errorf("unmarshal error: %w", err) } - lightBlock, err := types.LightBlockFromProto(&lbpb) + lightBlock, err := light.LightBlockFromProto(&lbpb) if err != nil { return nil, fmt.Errorf("proto conversion error: %w", err) } @@ -181,7 +181,7 @@ func (s *dbs) FirstLightBlockHeight() (int64, error) { // the given height. It returns ErrLightBlockNotFound if no such block exists. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) LightBlockBefore(height int64) (*types.LightBlock, error) { +func (s *dbs) LightBlockBefore(height int64) (*light.LightBlock, error) { if height <= 0 { panic("negative or zero height") } @@ -202,7 +202,7 @@ func (s *dbs) LightBlockBefore(height int64) (*types.LightBlock, error) { return nil, fmt.Errorf("unmarshal error: %w", err) } - lightBlock, err := types.LightBlockFromProto(&lbpb) + lightBlock, err := light.LightBlockFromProto(&lbpb) if err != nil { return nil, fmt.Errorf("proto conversion error: %w", err) } diff --git a/light/store/db/db_test.go b/light/store/db/db_test.go index b373d5126..495693419 100644 --- a/light/store/db/db_test.go +++ b/light/store/db/db_test.go @@ -14,7 +14,8 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/version" ) @@ -183,16 +184,16 @@ func Test_Concurrency(t *testing.T) { wg.Wait() } -func randLightBlock(height int64) *types.LightBlock { +func randLightBlock(height int64) *light.LightBlock { vals, _ := factory.RandValidatorSet(2, 1) - return &types.LightBlock{ - SignedHeader: &types.SignedHeader{ - Header: &types.Header{ + return &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ + Header: &metadata.Header{ Version: version.Consensus{Block: version.BlockProtocol, App: 0}, ChainID: tmrand.Str(12), Height: height, Time: time.Now(), - LastBlockID: types.BlockID{}, + LastBlockID: metadata.BlockID{}, LastCommitHash: crypto.CRandBytes(tmhash.Size), DataHash: crypto.CRandBytes(tmhash.Size), ValidatorsHash: crypto.CRandBytes(tmhash.Size), @@ -203,7 +204,7 @@ func randLightBlock(height int64) *types.LightBlock { EvidenceHash: crypto.CRandBytes(tmhash.Size), ProposerAddress: crypto.CRandBytes(crypto.AddressSize), }, - Commit: &types.Commit{}, + Commit: &metadata.Commit{}, }, ValidatorSet: vals, } diff --git a/light/store/store.go b/light/store/store.go index 7c29f233d..9e3f07017 100644 --- a/light/store/store.go +++ b/light/store/store.go @@ -1,6 +1,6 @@ package store -import "github.com/tendermint/tendermint/types" +import "github.com/tendermint/tendermint/pkg/light" // Store is anything that can persistently store headers. type Store interface { @@ -8,7 +8,7 @@ type Store interface { // ValidatorSet (h: sh.Height). // // height must be > 0. - SaveLightBlock(lb *types.LightBlock) error + SaveLightBlock(lb *light.LightBlock) error // DeleteSignedHeaderAndValidatorSet deletes SignedHeader (h: height) and // ValidatorSet (h: height). @@ -22,7 +22,7 @@ type Store interface { // height must be > 0. // // If LightBlock is not found, ErrLightBlockNotFound is returned. - LightBlock(height int64) (*types.LightBlock, error) + LightBlock(height int64) (*light.LightBlock, error) // LastLightBlockHeight returns the last (newest) LightBlock height. // @@ -37,7 +37,7 @@ type Store interface { // LightBlockBefore returns the LightBlock before a certain height. // // height must be > 0 && <= LastLightBlockHeight. - LightBlockBefore(height int64) (*types.LightBlock, error) + LightBlockBefore(height int64) (*light.LightBlock, error) // Prune removes headers & the associated validator sets when Store reaches a // defined size (number of header & validator set pairs). diff --git a/light/verifier.go b/light/verifier.go index ee4bfb053..1c4da4d2f 100644 --- a/light/verifier.go +++ b/light/verifier.go @@ -7,7 +7,8 @@ import ( "time" tmmath "github.com/tendermint/tendermint/libs/math" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" ) var ( @@ -31,10 +32,10 @@ var ( // future. // trustedHeader must have a ChainID, Height and Time func VerifyNonAdjacent( - trustedHeader *types.SignedHeader, // height=X - trustedVals *types.ValidatorSet, // height=X or height=X+1 - untrustedHeader *types.SignedHeader, // height=Y - untrustedVals *types.ValidatorSet, // height=Y + trustedHeader *metadata.SignedHeader, // height=X + trustedVals *consensus.ValidatorSet, // height=X or height=X+1 + untrustedHeader *metadata.SignedHeader, // height=Y + untrustedVals *consensus.ValidatorSet, // height=Y trustingPeriod time.Duration, now time.Time, maxClockDrift time.Duration, @@ -67,7 +68,7 @@ func VerifyNonAdjacent( err := trustedVals.VerifyCommitLightTrusting(trustedHeader.ChainID, untrustedHeader.Commit, trustLevel) if err != nil { switch e := err.(type) { - case types.ErrNotEnoughVotingPowerSigned: + case consensus.ErrNotEnoughVotingPowerSigned: return ErrNewValSetCantBeTrusted{e} default: return ErrInvalidHeader{e} @@ -101,9 +102,9 @@ func VerifyNonAdjacent( // future. // trustedHeader must have a ChainID, Height, Time and NextValidatorsHash func VerifyAdjacent( - trustedHeader *types.SignedHeader, // height=X - untrustedHeader *types.SignedHeader, // height=X+1 - untrustedVals *types.ValidatorSet, // height=X+1 + trustedHeader *metadata.SignedHeader, // height=X + untrustedHeader *metadata.SignedHeader, // height=X+1 + untrustedVals *consensus.ValidatorSet, // height=X+1 trustingPeriod time.Duration, now time.Time, maxClockDrift time.Duration) error { @@ -150,10 +151,10 @@ func VerifyAdjacent( // Verify combines both VerifyAdjacent and VerifyNonAdjacent functions. func Verify( - trustedHeader *types.SignedHeader, // height=X - trustedVals *types.ValidatorSet, // height=X or height=X+1 - untrustedHeader *types.SignedHeader, // height=Y - untrustedVals *types.ValidatorSet, // height=Y + trustedHeader *metadata.SignedHeader, // height=X + trustedVals *consensus.ValidatorSet, // height=X or height=X+1 + untrustedHeader *metadata.SignedHeader, // height=Y + untrustedVals *consensus.ValidatorSet, // height=Y trustingPeriod time.Duration, now time.Time, maxClockDrift time.Duration, @@ -180,7 +181,7 @@ func ValidateTrustLevel(lvl tmmath.Fraction) error { } // HeaderExpired return true if the given header expired. -func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time.Time) bool { +func HeaderExpired(h *metadata.SignedHeader, trustingPeriod time.Duration, now time.Time) bool { expirationTime := h.Time.Add(trustingPeriod) return !expirationTime.After(now) } @@ -198,7 +199,7 @@ func HeaderExpired(h *types.SignedHeader, trustingPeriod time.Duration, now time // or not. These checks are not necessary because the detector never runs during // backwards verification and thus evidence that needs to be within a certain // time bound is never sent. -func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { +func VerifyBackwards(untrustedHeader, trustedHeader *metadata.Header) error { if err := untrustedHeader.ValidateBasic(); err != nil { return ErrInvalidHeader{err} } @@ -228,9 +229,9 @@ func VerifyBackwards(untrustedHeader, trustedHeader *types.Header) error { // NOTE: This function assumes that untrustedHeader is after trustedHeader. // Do not use for backwards verification. func verifyNewHeaderAndVals( - untrustedHeader *types.SignedHeader, - untrustedVals *types.ValidatorSet, - trustedHeader *types.SignedHeader, + untrustedHeader *metadata.SignedHeader, + untrustedVals *consensus.ValidatorSet, + trustedHeader *metadata.SignedHeader, now time.Time, maxClockDrift time.Duration) error { @@ -268,7 +269,7 @@ func verifyNewHeaderAndVals( return nil } -func checkRequiredHeaderFields(h *types.SignedHeader) { +func checkRequiredHeaderFields(h *metadata.SignedHeader) { if h.Height == 0 { panic("height in trusted header must be set (non zero") } diff --git a/light/verifier_test.go b/light/verifier_test.go index 0432c130d..cb0b70bf3 100644 --- a/light/verifier_test.go +++ b/light/verifier_test.go @@ -9,7 +9,8 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/light" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" ) const ( @@ -33,8 +34,8 @@ func TestVerifyAdjacentHeaders(t *testing.T) { ) testCases := []struct { - newHeader *types.SignedHeader - newVals *types.ValidatorSet + newHeader *metadata.SignedHeader + newVals *consensus.ValidatorSet trustingPeriod time.Duration now time.Time expErr error @@ -117,7 +118,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - light.ErrInvalidHeader{Reason: types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, + light.ErrInvalidHeader{Reason: consensus.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // vals does not match with what we have -> error @@ -197,8 +198,8 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { ) testCases := []struct { - newHeader *types.SignedHeader - newVals *types.ValidatorSet + newHeader *metadata.SignedHeader + newVals *consensus.ValidatorSet trustingPeriod time.Duration now time.Time expErr error @@ -231,7 +232,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - light.ErrInvalidHeader{types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, + light.ErrInvalidHeader{consensus.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // 3/3 new vals signed, 2/3 old vals present -> no error @@ -261,7 +262,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { lessThanOneThirdVals, 3 * time.Hour, bTime.Add(2 * time.Hour), - light.ErrNewValSetCantBeTrusted{types.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, + light.ErrNewValSetCantBeTrusted{consensus.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, "", }, } diff --git a/node/node.go b/node/node.go index ced8af729..089cd4716 100644 --- a/node/node.go +++ b/node/node.go @@ -14,7 +14,6 @@ import ( "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/cors" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" cs "github.com/tendermint/tendermint/internal/consensus" @@ -30,6 +29,10 @@ import ( "github.com/tendermint/tendermint/libs/strings" tmtime "github.com/tendermint/tendermint/libs/time" "github.com/tendermint/tendermint/light" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/privval" tmgrpc "github.com/tendermint/tendermint/privval/grpc" "github.com/tendermint/tendermint/proxy" @@ -39,7 +42,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) // nodeImpl is the highest level interface to a full Tendermint node. @@ -49,8 +51,8 @@ type nodeImpl struct { // config config *cfg.Config - genesisDoc *types.GenesisDoc // initial validator set - privValidator types.PrivValidator // local node's validator key + genesisDoc *consensus.GenesisDoc // initial validator set + privValidator consensus.PrivValidator // local node's validator key // network transport *p2p.MConnTransport @@ -58,12 +60,12 @@ type nodeImpl struct { peerManager *p2p.PeerManager router *p2p.Router addrBook pex.AddrBook // known peers - nodeInfo types.NodeInfo - nodeKey types.NodeKey // our node privkey + nodeInfo p2ptypes.NodeInfo + nodeKey p2ptypes.NodeKey // our node privkey isListening bool // services - eventBus *types.EventBus // pub/sub for services + eventBus *events.EventBus // pub/sub for services stateStore sm.Store blockStore *store.BlockStore // store the blockchain to disk bcReactor service.Service // for block-syncing @@ -88,7 +90,7 @@ type nodeImpl struct { // PrivValidator, ClientCreator, GenesisDoc, and DBProvider. // It implements NodeProvider. func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, error) { - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := p2ptypes.LoadOrGenNodeKey(config.NodeKeyFile()) if err != nil { return nil, fmt.Errorf("failed to load or gen node key %s: %w", config.NodeKeyFile(), err) } @@ -124,8 +126,8 @@ func newDefaultNode(config *cfg.Config, logger log.Logger) (service.Service, err // makeNode returns a new, ready to go, Tendermint Node. func makeNode(config *cfg.Config, - privValidator types.PrivValidator, - nodeKey types.NodeKey, + privValidator consensus.PrivValidator, + nodeKey p2ptypes.NodeKey, clientCreator proxy.ClientCreator, genesisDocProvider genesisDocProvider, dbProvider cfg.DBProvider, @@ -459,7 +461,7 @@ func makeNode(config *cfg.Config, // makeSeedNode returns a new seed node, containing only p2p, pex reactor func makeSeedNode(config *cfg.Config, dbProvider cfg.DBProvider, - nodeKey types.NodeKey, + nodeKey p2ptypes.NodeKey, genesisDocProvider genesisDocProvider, logger log.Logger, ) (service.Service, error) { @@ -586,7 +588,7 @@ func (n *nodeImpl) OnStart() error { } // Start the transport. - addr, err := types.NewNetAddressString(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) + addr, err := p2ptypes.NewNetAddressString(n.nodeKey.ID.AddressString(n.config.P2P.ListenAddress)) if err != nil { return err } @@ -986,18 +988,18 @@ func (n *nodeImpl) EvidencePool() *evidence.Pool { } // EventBus returns the Node's EventBus. -func (n *nodeImpl) EventBus() *types.EventBus { +func (n *nodeImpl) EventBus() *events.EventBus { return n.eventBus } // PrivValidator returns the Node's PrivValidator. // XXX: for convenience only! -func (n *nodeImpl) PrivValidator() types.PrivValidator { +func (n *nodeImpl) PrivValidator() consensus.PrivValidator { return n.privValidator } // GenesisDoc returns the Node's GenesisDoc. -func (n *nodeImpl) GenesisDoc() *types.GenesisDoc { +func (n *nodeImpl) GenesisDoc() *consensus.GenesisDoc { return n.genesisDoc } @@ -1029,7 +1031,7 @@ func (n *nodeImpl) IsListening() bool { } // NodeInfo returns the Node's Info from the Switch. -func (n *nodeImpl) NodeInfo() types.NodeInfo { +func (n *nodeImpl) NodeInfo() p2ptypes.NodeInfo { return n.nodeInfo } @@ -1042,7 +1044,7 @@ func startStateSync( config *cfg.StateSyncConfig, blockSync bool, stateInitHeight int64, - eb *types.EventBus, + eb *events.EventBus, ) error { stateSyncLogger := eb.Logger.With("module", "statesync") @@ -1050,7 +1052,7 @@ func startStateSync( // at the beginning of the statesync start, we use the initialHeight as the event height // because of the statesync doesn't have the concreate state height before fetched the snapshot. - d := types.EventDataStateSyncStatus{Complete: false, Height: stateInitHeight} + d := events.EventDataStateSyncStatus{Complete: false, Height: stateInitHeight} if err := eb.PublishEventStateSyncStatus(d); err != nil { stateSyncLogger.Error("failed to emit the statesync start event", "err", err) } @@ -1069,7 +1071,7 @@ func startStateSync( conR.SetStateSyncingMetrics(0) - d := types.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} + d := events.EventDataStateSyncStatus{Complete: true, Height: state.LastBlockHeight} if err := eb.PublishEventStateSyncStatus(d); err != nil { stateSyncLogger.Error("failed to emit the statesync start event", "err", err) } @@ -1082,7 +1084,7 @@ func startStateSync( return } - d := types.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} + d := events.EventDataBlockSyncStatus{Complete: false, Height: state.LastBlockHeight} if err := eb.PublishEventBlockSyncStatus(d); err != nil { stateSyncLogger.Error("failed to emit the block sync starting event", "err", err) } @@ -1097,13 +1099,13 @@ func startStateSync( // genesisDocProvider returns a GenesisDoc. // It allows the GenesisDoc to be pulled from sources other than the // filesystem, for instance from a distributed key-value store cluster. -type genesisDocProvider func() (*types.GenesisDoc, error) +type genesisDocProvider func() (*consensus.GenesisDoc, error) // defaultGenesisDocProviderFunc returns a GenesisDocProvider that loads // the GenesisDoc from the config.GenesisFile() on the filesystem. func defaultGenesisDocProviderFunc(config *cfg.Config) genesisDocProvider { - return func() (*types.GenesisDoc, error) { - return types.GenesisDocFromFile(config.GenesisFile()) + return func() (*consensus.GenesisDoc, error) { + return consensus.GenesisDocFromFile(config.GenesisFile()) } } @@ -1131,7 +1133,7 @@ func defaultMetricsProvider(config *cfg.InstrumentationConfig) metricsProvider { // returns the genesis doc loaded through the given provider. func loadStateFromDBOrGenesisDocProvider( stateStore sm.Store, - genDoc *types.GenesisDoc, + genDoc *consensus.GenesisDoc, ) (sm.State, error) { // 1. Attempt to load state form the database @@ -1155,7 +1157,7 @@ func createAndStartPrivValidatorSocketClient( listenAddr, chainID string, logger log.Logger, -) (types.PrivValidator, error) { +) (consensus.PrivValidator, error) { pve, err := privval.NewSignerListener(listenAddr, logger) if err != nil { @@ -1186,7 +1188,7 @@ func createAndStartPrivValidatorGRPCClient( config *cfg.Config, chainID string, logger log.Logger, -) (types.PrivValidator, error) { +) (consensus.PrivValidator, error) { pvsc, err := tmgrpc.DialRemoteSigner( config.PrivValidator, chainID, @@ -1216,7 +1218,7 @@ func getRouterConfig(conf *cfg.Config, proxyApp proxy.AppConns) p2p.RouterOption } if conf.FilterPeers && proxyApp != nil { - opts.FilterPeerByID = func(ctx context.Context, id types.NodeID) error { + opts.FilterPeerByID = func(ctx context.Context, id p2ptypes.NodeID) error { res, err := proxyApp.Query().QuerySync(context.Background(), abci.RequestQuery{ Path: fmt.Sprintf("/p2p/filter/id/%s", id), }) diff --git a/node/node_test.go b/node/node_test.go index 16edb4210..7a3ec1062 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -22,23 +22,27 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" consmocks "github.com/tendermint/tendermint/internal/consensus/mocks" - ssmocks "github.com/tendermint/tendermint/internal/statesync/mocks" - "github.com/tendermint/tendermint/internal/evidence" "github.com/tendermint/tendermint/internal/mempool" mempoolv0 "github.com/tendermint/tendermint/internal/mempool/v0" statesync "github.com/tendermint/tendermint/internal/statesync" + ssmocks "github.com/tendermint/tendermint/internal/statesync/mocks" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + evtypes "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" ) func TestNodeStartStop(t *testing.T) { @@ -56,7 +60,7 @@ func TestNodeStartStop(t *testing.T) { t.Logf("Started node %v", n.sw.NodeInfo()) // wait for the node to produce a block - blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock) + blocksSub, err := n.EventBus().Subscribe(context.Background(), "node_test", events.EventQueryNewBlock) require.NoError(t, err) select { case <-blocksSub.Out(): @@ -148,7 +152,7 @@ func TestNodeSetPrivValTCP(t *testing.T) { signerServer := privval.NewSignerServer( dialerEndpoint, config.ChainID(), - types.NewMockPV(), + consensus.NewMockPV(), ) go func() { @@ -193,7 +197,7 @@ func TestNodeSetPrivValIPC(t *testing.T) { pvsc := privval.NewSignerServer( dialerEndpoint, config.ChainID(), - types.NewMockPV(), + consensus.NewMockPV(), ) go func() { @@ -257,14 +261,14 @@ func TestCreateProposalBlock(t *testing.T) { // than can fit in a block var currentBytes int64 = 0 for currentBytes <= maxEvidenceBytes { - ev := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") + ev := evtypes.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[0], "test-chain") currentBytes += int64(len(ev.Bytes())) evidencePool.ReportConflictingVotes(ev.VoteA, ev.VoteB) } evList, size := evidencePool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) require.Less(t, size, state.ConsensusParams.Evidence.MaxBytes+1) - evData := &types.EvidenceData{Evidence: evList} + evData := &block.EvidenceData{Evidence: evList} require.EqualValues(t, size, evData.ByteSize()) // fill the mempool with more txs @@ -285,7 +289,7 @@ func TestCreateProposalBlock(t *testing.T) { blockStore, ) - commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + commit := metadata.NewCommit(height-1, 0, metadata.BlockID{}, nil) block, _ := blockExec.CreateProposalBlock( height, state, commit, @@ -296,7 +300,7 @@ func TestCreateProposalBlock(t *testing.T) { partSet := block.MakePartSet(partSize) assert.Less(t, partSet.ByteSize(), int64(maxBytes)) - partSetFromHeader := types.NewPartSetFromHeader(partSet.Header()) + partSetFromHeader := metadata.NewPartSetFromHeader(partSet.Header()) for partSetFromHeader.Count() < partSetFromHeader.Total() { added, err := partSetFromHeader.AddPart(partSet.GetPart(int(partSetFromHeader.Count()))) require.NoError(t, err) @@ -340,7 +344,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size - txLength := int(types.MaxDataBytesNoEvidence(maxBytes, 1)) + txLength := int(block.MaxDataBytesNoEvidence(maxBytes, 1)) tx := tmrand.Bytes(txLength - 4) // to account for the varint err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) assert.NoError(t, err) @@ -354,7 +358,7 @@ func TestMaxTxsProposalBlockSize(t *testing.T) { blockStore, ) - commit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + commit := metadata.NewCommit(height-1, 0, metadata.BlockID{}, nil) block, _ := blockExec.CreateProposalBlock( height, state, commit, @@ -381,7 +385,7 @@ func TestMaxProposalBlockSize(t *testing.T) { logger := log.TestingLogger() - state, stateDB, _ := state(types.MaxVotesCount, int64(1)) + state, stateDB, _ := state(consensus.MaxVotesCount, int64(1)) stateStore := sm.NewStore(stateDB) blockStore := store.NewBlockStore(dbm.NewMemDB()) const maxBytes int64 = 1024 * 1024 * 2 @@ -400,7 +404,7 @@ func TestMaxProposalBlockSize(t *testing.T) { mp.SetLogger(logger) // fill the mempool with one txs just below the maximum size - txLength := int(types.MaxDataBytesNoEvidence(maxBytes, types.MaxVotesCount)) + txLength := int(block.MaxDataBytesNoEvidence(maxBytes, consensus.MaxVotesCount)) tx := tmrand.Bytes(txLength - 6) // to account for the varint err = mp.CheckTx(context.Background(), tx, nil, mempool.TxInfo{}) assert.NoError(t, err) @@ -421,9 +425,9 @@ func TestMaxProposalBlockSize(t *testing.T) { blockStore, ) - blockID := types.BlockID{ + blockID := metadata.BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: math.MaxInt32, Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, @@ -439,26 +443,26 @@ func TestMaxProposalBlockSize(t *testing.T) { state.Version.Consensus.Block = math.MaxInt64 state.Version.Consensus.App = math.MaxInt64 maxChainID := "" - for i := 0; i < types.MaxChainIDLen; i++ { + for i := 0; i < metadata.MaxChainIDLen; i++ { maxChainID += "𠜎" } state.ChainID = maxChainID - cs := types.CommitSig{ - BlockIDFlag: types.BlockIDFlagNil, + cs := metadata.CommitSig{ + BlockIDFlag: metadata.BlockIDFlagNil, ValidatorAddress: crypto.AddressHash([]byte("validator_address")), Timestamp: timestamp, - Signature: crypto.CRandBytes(types.MaxSignatureSize), + Signature: crypto.CRandBytes(metadata.MaxSignatureSize), } - commit := &types.Commit{ + commit := &metadata.Commit{ Height: math.MaxInt64, Round: math.MaxInt32, BlockID: blockID, } // add maximum amount of signatures to a single commit - for i := 0; i < types.MaxVotesCount; i++ { + for i := 0; i < consensus.MaxVotesCount; i++ { commit.Signatures = append(commit.Signatures, cs) } @@ -475,8 +479,8 @@ func TestMaxProposalBlockSize(t *testing.T) { require.NoError(t, err) // require that the header and commit be the max possible size - require.Equal(t, int64(pb.Header.Size()), types.MaxHeaderBytes) - require.Equal(t, int64(pb.LastCommit.Size()), types.MaxCommitBytes(types.MaxVotesCount)) + require.Equal(t, int64(pb.Header.Size()), metadata.MaxHeaderBytes) + require.Equal(t, int64(pb.LastCommit.Size()), metadata.MaxCommitBytes(consensus.MaxVotesCount)) // make sure that the block is less than the max possible size assert.Equal(t, int64(pb.Size()), maxBytes) // because of the proto overhead we expect the part set bytes to be equal or @@ -490,7 +494,7 @@ func TestNodeNewSeedNode(t *testing.T) { config.Mode = cfg.ModeSeed defer os.RemoveAll(config.RootDir) - nodeKey, err := types.LoadOrGenNodeKey(config.NodeKeyFile()) + nodeKey, err := p2ptypes.LoadOrGenNodeKey(config.NodeKeyFile()) require.NoError(t, err) ns, err := makeSeedNode(config, @@ -594,20 +598,20 @@ func TestNodeSetEventSink(t *testing.T) { assert.Equal(t, e, err) } -func state(nVals int, height int64) (sm.State, dbm.DB, []types.PrivValidator) { - privVals := make([]types.PrivValidator, nVals) - vals := make([]types.GenesisValidator, nVals) +func state(nVals int, height int64) (sm.State, dbm.DB, []consensus.PrivValidator) { + privVals := make([]consensus.PrivValidator, nVals) + vals := make([]consensus.GenesisValidator, nVals) for i := 0; i < nVals; i++ { - privVal := types.NewMockPV() + privVal := consensus.NewMockPV() privVals[i] = privVal - vals[i] = types.GenesisValidator{ + vals[i] = consensus.GenesisValidator{ Address: privVal.PrivKey.PubKey().Address(), PubKey: privVal.PrivKey.PubKey(), Power: 1000, Name: fmt.Sprintf("test%d", i), } } - s, _ := sm.MakeGenesisState(&types.GenesisDoc{ + s, _ := sm.MakeGenesisState(&consensus.GenesisDoc{ ChainID: "test-chain", Validators: vals, AppHash: nil, @@ -674,7 +678,7 @@ func TestNodeStartStateSync(t *testing.T) { require.NoError(t, err) require.NotNil(t, eventBus) - sub, err := eventBus.Subscribe(context.Background(), "test-client", types.EventQueryStateSyncStatus, 10) + sub, err := eventBus.Subscribe(context.Background(), "test-client", events.EventQueryStateSyncStatus, 10) require.NoError(t, err) require.NotNil(t, sub) @@ -712,7 +716,7 @@ func TestNodeStartStateSync(t *testing.T) { func ensureStateSyncStatus(t *testing.T, msg tmpubsub.Message, complete bool, height int64) { t.Helper() - status, ok := msg.Data().(types.EventDataStateSyncStatus) + status, ok := msg.Data().(events.EventDataStateSyncStatus) require.True(t, ok) require.Equal(t, complete, status.Complete) diff --git a/node/public.go b/node/public.go index 99a8226d0..6c6ecaa14 100644 --- a/node/public.go +++ b/node/public.go @@ -7,9 +7,10 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) // NewDefault constructs a tendermint node service for use in go @@ -29,9 +30,9 @@ func NewDefault(conf *config.Config, logger log.Logger) (service.Service, error) func New(conf *config.Config, logger log.Logger, cf proxy.ClientCreator, - gen *types.GenesisDoc, + gen *consensus.GenesisDoc, ) (service.Service, error) { - nodeKey, err := types.LoadOrGenNodeKey(conf.NodeKeyFile()) + nodeKey, err := p2p.LoadOrGenNodeKey(conf.NodeKeyFile()) if err != nil { return nil, fmt.Errorf("failed to load or gen node key %s: %w", conf.NodeKeyFile(), err) } @@ -41,7 +42,7 @@ func New(conf *config.Config, case nil: genProvider = defaultGenesisDocProviderFunc(conf) default: - genProvider = func() (*types.GenesisDoc, error) { return gen, nil } + genProvider = func() (*consensus.GenesisDoc, error) { return gen, nil } } switch conf.Mode { diff --git a/node/setup.go b/node/setup.go index af48fb382..34332af55 100644 --- a/node/setup.go +++ b/node/setup.go @@ -13,7 +13,6 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" bcv0 "github.com/tendermint/tendermint/internal/blocksync/v0" @@ -29,6 +28,10 @@ import ( "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" tmstrings "github.com/tendermint/tendermint/libs/strings" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" protop2p "github.com/tendermint/tendermint/proto/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" @@ -37,7 +40,6 @@ import ( null "github.com/tendermint/tendermint/state/indexer/sink/null" psql "github.com/tendermint/tendermint/state/indexer/sink/psql" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -62,8 +64,8 @@ func createAndStartProxyAppConns(clientCreator proxy.ClientCreator, logger log.L return proxyApp, nil } -func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { - eventBus := types.NewEventBus() +func createAndStartEventBus(logger log.Logger) (*events.EventBus, error) { + eventBus := events.NewEventBus() eventBus.SetLogger(logger.With("module", "events")) if err := eventBus.Start(); err != nil { return nil, err @@ -74,7 +76,7 @@ func createAndStartEventBus(logger log.Logger) (*types.EventBus, error) { func createAndStartIndexerService( config *cfg.Config, dbProvider cfg.DBProvider, - eventBus *types.EventBus, + eventBus *events.EventBus, logger log.Logger, chainID string, ) (*indexer.Service, []indexer.EventSink, error) { @@ -144,8 +146,8 @@ func doHandshake( stateStore sm.Store, state sm.State, blockStore sm.BlockStore, - genDoc *types.GenesisDoc, - eventBus types.BlockEventPublisher, + genDoc *consensus.GenesisDoc, + eventBus events.BlockEventPublisher, proxyApp proxy.AppConns, consensusLogger log.Logger) error { @@ -386,10 +388,10 @@ func createConsensusReactor( blockStore sm.BlockStore, mp mempool.Mempool, evidencePool *evidence.Pool, - privValidator types.PrivValidator, + privValidator consensus.PrivValidator, csMetrics *cs.Metrics, waitSync bool, - eventBus *types.EventBus, + eventBus *events.EventBus, peerManager *p2p.PeerManager, router *p2p.Router, logger log.Logger, @@ -458,7 +460,7 @@ func createPeerManager( config *cfg.Config, dbProvider cfg.DBProvider, p2pLogger log.Logger, - nodeID types.NodeID, + nodeID p2ptypes.NodeID, ) (*p2p.PeerManager, error) { var maxConns uint16 @@ -484,9 +486,9 @@ func createPeerManager( maxConns = 64 } - privatePeerIDs := make(map[types.NodeID]struct{}) + privatePeerIDs := make(map[p2ptypes.NodeID]struct{}) for _, id := range tmstrings.SplitAndTrimEmpty(config.P2P.PrivatePeerIDs, ",", " ") { - privatePeerIDs[types.NodeID(id)] = struct{}{} + privatePeerIDs[p2ptypes.NodeID(id)] = struct{}{} } options := p2p.PeerManagerOptions{ @@ -541,7 +543,7 @@ func createPeerManager( func createRouter( p2pLogger log.Logger, p2pMetrics *p2p.Metrics, - nodeInfo types.NodeInfo, + nodeInfo p2ptypes.NodeInfo, privKey crypto.PrivKey, peerManager *p2p.PeerManager, transport p2p.Transport, @@ -569,8 +571,8 @@ func createSwitch( consensusReactor *p2p.ReactorShim, evidenceReactor *p2p.ReactorShim, proxyApp proxy.AppConns, - nodeInfo types.NodeInfo, - nodeKey types.NodeKey, + nodeInfo p2ptypes.NodeInfo, + nodeKey p2ptypes.NodeKey, p2pLogger log.Logger, ) *p2p.Switch { @@ -648,21 +650,21 @@ func createSwitch( } func createAddrBookAndSetOnSwitch(config *cfg.Config, sw *p2p.Switch, - p2pLogger log.Logger, nodeKey types.NodeKey) (pex.AddrBook, error) { + p2pLogger log.Logger, nodeKey p2ptypes.NodeKey) (pex.AddrBook, error) { addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) // Add ourselves to addrbook to prevent dialing ourselves if config.P2P.ExternalAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress)) + addr, err := p2ptypes.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ExternalAddress)) if err != nil { return nil, fmt.Errorf("p2p.external_address is incorrect: %w", err) } addrBook.AddOurAddress(addr) } if config.P2P.ListenAddress != "" { - addr, err := types.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress)) + addr, err := p2ptypes.NewNetAddressString(nodeKey.ID.AddressString(config.P2P.ListenAddress)) if err != nil { return nil, fmt.Errorf("p2p.laddr is incorrect: %w", err) } @@ -713,11 +715,11 @@ func createPEXReactorV2( func makeNodeInfo( config *cfg.Config, - nodeKey types.NodeKey, + nodeKey p2ptypes.NodeKey, eventSinks []indexer.EventSink, - genDoc *types.GenesisDoc, + genDoc *consensus.GenesisDoc, state sm.State, -) (types.NodeInfo, error) { +) (p2ptypes.NodeInfo, error) { txIndexerStatus := "off" if indexer.IndexingEnabled(eventSinks) { @@ -733,11 +735,11 @@ func makeNodeInfo( bcChannel = bcv2.BlockchainChannel default: - return types.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version) + return p2ptypes.NodeInfo{}, fmt.Errorf("unknown blocksync version %s", config.BlockSync.Version) } - nodeInfo := types.NodeInfo{ - ProtocolVersion: types.ProtocolVersion{ + nodeInfo := p2ptypes.NodeInfo{ + ProtocolVersion: p2ptypes.ProtocolVersion{ P2P: version.P2PProtocol, // global Block: state.Version.Consensus.Block, App: state.Version.Consensus.App, @@ -758,7 +760,7 @@ func makeNodeInfo( byte(statesync.LightBlockChannel), }, Moniker: config.Moniker, - Other: types.NodeInfoOther{ + Other: p2ptypes.NodeInfoOther{ TxIndex: txIndexerStatus, RPCAddress: config.RPC.ListenAddress, }, @@ -782,12 +784,12 @@ func makeNodeInfo( func makeSeedNodeInfo( config *cfg.Config, - nodeKey types.NodeKey, - genDoc *types.GenesisDoc, + nodeKey p2ptypes.NodeKey, + genDoc *consensus.GenesisDoc, state sm.State, -) (types.NodeInfo, error) { - nodeInfo := types.NodeInfo{ - ProtocolVersion: types.ProtocolVersion{ +) (p2ptypes.NodeInfo, error) { + nodeInfo := p2ptypes.NodeInfo{ + ProtocolVersion: p2ptypes.ProtocolVersion{ P2P: version.P2PProtocol, // global Block: state.Version.Consensus.Block, App: state.Version.Consensus.App, @@ -797,7 +799,7 @@ func makeSeedNodeInfo( Version: version.TMVersion, Channels: []byte{}, Moniker: config.Moniker, - Other: types.NodeInfoOther{ + Other: p2ptypes.NodeInfoOther{ TxIndex: "off", RPCAddress: config.RPC.ListenAddress, }, diff --git a/privval/file.go b/privval/file.go index 4ec918c70..38cf2c75c 100644 --- a/privval/file.go +++ b/privval/file.go @@ -19,8 +19,8 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" tmos "github.com/tendermint/tendermint/libs/os" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) // TODO: type ? @@ -47,7 +47,7 @@ func voteToStep(vote *tmproto.Vote) int8 { // FilePVKey stores the immutable part of PrivValidator. type FilePVKey struct { - Address types.Address `json:"address"` + Address crypto.Address `json:"address"` PubKey crypto.PubKey `json:"pub_key"` PrivKey crypto.PrivKey `json:"priv_key"` @@ -154,7 +154,7 @@ type FilePV struct { LastSignState FilePVLastSignState } -var _ types.PrivValidator = (*FilePV)(nil) +var _ consensus.PrivValidator = (*FilePV)(nil) // NewFilePV generates a new validator from the given key and paths. func NewFilePV(privKey crypto.PrivKey, keyFilePath, stateFilePath string) *FilePV { @@ -176,9 +176,9 @@ func NewFilePV(privKey crypto.PrivKey, keyFilePath, stateFilePath string) *FileP // and sets the filePaths, but does not call Save(). func GenFilePV(keyFilePath, stateFilePath, keyType string) (*FilePV, error) { switch keyType { - case types.ABCIPubKeyTypeSecp256k1: + case consensus.ABCIPubKeyTypeSecp256k1: return NewFilePV(secp256k1.GenPrivKey(), keyFilePath, stateFilePath), nil - case "", types.ABCIPubKeyTypeEd25519: + case "", consensus.ABCIPubKeyTypeEd25519: return NewFilePV(ed25519.GenPrivKey(), keyFilePath, stateFilePath), nil default: return nil, fmt.Errorf("key type: %s is not supported", keyType) @@ -254,7 +254,7 @@ func LoadOrGenFilePV(keyFilePath, stateFilePath string) (*FilePV, error) { // GetAddress returns the address of the validator. // Implements PrivValidator. -func (pv *FilePV) GetAddress() types.Address { +func (pv *FilePV) GetAddress() consensus.Address { return pv.Key.Address } @@ -326,7 +326,7 @@ func (pv *FilePV) signVote(chainID string, vote *tmproto.Vote) error { return err } - signBytes := types.VoteSignBytes(chainID, vote) + signBytes := consensus.VoteSignBytes(chainID, vote) // We might crash before writing to the wal, // causing us to try to re-sign for the same HRS. @@ -368,7 +368,7 @@ func (pv *FilePV) signProposal(chainID string, proposal *tmproto.Proposal) error return err } - signBytes := types.ProposalSignBytes(chainID, proposal) + signBytes := consensus.ProposalSignBytes(chainID, proposal) // We might crash before writing to the wal, // causing us to try to re-sign for the same HRS. diff --git a/privval/file_test.go b/privval/file_test.go index 680428ac2..aee2ec7e7 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -17,8 +17,9 @@ import ( tmjson "github.com/tendermint/tendermint/libs/json" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) func TestGenLoadValidator(t *testing.T) { @@ -60,7 +61,7 @@ func TestResetValidator(t *testing.T) { height, round := int64(10), int32(1) voteType := tmproto.PrevoteType randBytes := tmrand.Bytes(tmhash.Size) - blockID := types.BlockID{Hash: randBytes, PartSetHeader: types.PartSetHeader{}} + blockID := metadata.BlockID{Hash: randBytes, PartSetHeader: metadata.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) err = privVal.SignVote(context.Background(), "mychainid", vote.ToProto()) assert.NoError(t, err, "expected no error signing vote") @@ -176,10 +177,10 @@ func TestSignVote(t *testing.T) { randbytes := tmrand.Bytes(tmhash.Size) randbytes2 := tmrand.Bytes(tmhash.Size) - block1 := types.BlockID{Hash: randbytes, - PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} - block2 := types.BlockID{Hash: randbytes2, - PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} + block1 := metadata.BlockID{Hash: randbytes, + PartSetHeader: metadata.PartSetHeader{Total: 5, Hash: randbytes}} + block2 := metadata.BlockID{Hash: randbytes2, + PartSetHeader: metadata.PartSetHeader{Total: 10, Hash: randbytes2}} height, round := int64(10), int32(1) voteType := tmproto.PrevoteType @@ -195,7 +196,7 @@ func TestSignVote(t *testing.T) { assert.NoError(err, "expected no error on signing same vote") // now try some bad votes - cases := []*types.Vote{ + cases := []*consensus.Vote{ newVote(privVal.Key.Address, 0, height, round-1, voteType, block1), // round regression newVote(privVal.Key.Address, 0, height-1, round, voteType, block1), // height regression newVote(privVal.Key.Address, 0, height-2, round+4, voteType, block1), // height regression and different round @@ -230,10 +231,10 @@ func TestSignProposal(t *testing.T) { randbytes := tmrand.Bytes(tmhash.Size) randbytes2 := tmrand.Bytes(tmhash.Size) - block1 := types.BlockID{Hash: randbytes, - PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} - block2 := types.BlockID{Hash: randbytes2, - PartSetHeader: types.PartSetHeader{Total: 10, Hash: randbytes2}} + block1 := metadata.BlockID{Hash: randbytes, + PartSetHeader: metadata.PartSetHeader{Total: 5, Hash: randbytes}} + block2 := metadata.BlockID{Hash: randbytes2, + PartSetHeader: metadata.PartSetHeader{Total: 10, Hash: randbytes2}} height, round := int64(10), int32(1) // sign a proposal for first time @@ -247,7 +248,7 @@ func TestSignProposal(t *testing.T) { assert.NoError(err, "expected no error on signing same proposal") // now try some bad Proposals - cases := []*types.Proposal{ + cases := []*consensus.Proposal{ newProposal(height, round-1, block1), // round regression newProposal(height-1, round, block1), // height regression newProposal(height-2, round+4, block1), // height regression and different round @@ -276,7 +277,7 @@ func TestDifferByTimestamp(t *testing.T) { privVal, err := GenFilePV(tempKeyFile.Name(), tempStateFile.Name(), "") require.NoError(t, err) randbytes := tmrand.Bytes(tmhash.Size) - block1 := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{Total: 5, Hash: randbytes}} + block1 := metadata.BlockID{Hash: randbytes, PartSetHeader: metadata.PartSetHeader{Total: 5, Hash: randbytes}} height, round := int64(10), int32(1) chainID := "mychainid" @@ -286,7 +287,7 @@ func TestDifferByTimestamp(t *testing.T) { pb := proposal.ToProto() err := privVal.SignProposal(context.Background(), chainID, pb) assert.NoError(t, err, "expected no error signing proposal") - signBytes := types.ProposalSignBytes(chainID, pb) + signBytes := consensus.ProposalSignBytes(chainID, pb) sig := proposal.Signature timeStamp := proposal.Timestamp @@ -299,20 +300,20 @@ func TestDifferByTimestamp(t *testing.T) { assert.NoError(t, err, "expected no error on signing same proposal") assert.Equal(t, timeStamp, pb.Timestamp) - assert.Equal(t, signBytes, types.ProposalSignBytes(chainID, pb)) + assert.Equal(t, signBytes, consensus.ProposalSignBytes(chainID, pb)) assert.Equal(t, sig, proposal.Signature) } // test vote { voteType := tmproto.PrevoteType - blockID := types.BlockID{Hash: randbytes, PartSetHeader: types.PartSetHeader{}} + blockID := metadata.BlockID{Hash: randbytes, PartSetHeader: metadata.PartSetHeader{}} vote := newVote(privVal.Key.Address, 0, height, round, voteType, blockID) v := vote.ToProto() err := privVal.SignVote(context.Background(), "mychainid", v) assert.NoError(t, err, "expected no error signing vote") - signBytes := types.VoteSignBytes(chainID, v) + signBytes := consensus.VoteSignBytes(chainID, v) sig := v.Signature timeStamp := vote.Timestamp @@ -324,14 +325,14 @@ func TestDifferByTimestamp(t *testing.T) { assert.NoError(t, err, "expected no error on signing same vote") assert.Equal(t, timeStamp, v.Timestamp) - assert.Equal(t, signBytes, types.VoteSignBytes(chainID, v)) + assert.Equal(t, signBytes, consensus.VoteSignBytes(chainID, v)) assert.Equal(t, sig, v.Signature) } } -func newVote(addr types.Address, idx int32, height int64, round int32, - typ tmproto.SignedMsgType, blockID types.BlockID) *types.Vote { - return &types.Vote{ +func newVote(addr consensus.Address, idx int32, height int64, round int32, + typ tmproto.SignedMsgType, blockID metadata.BlockID) *consensus.Vote { + return &consensus.Vote{ ValidatorAddress: addr, ValidatorIndex: idx, Height: height, @@ -342,8 +343,8 @@ func newVote(addr types.Address, idx int32, height int64, round int32, } } -func newProposal(height int64, round int32, blockID types.BlockID) *types.Proposal { - return &types.Proposal{ +func newProposal(height int64, round int32, blockID metadata.BlockID) *consensus.Proposal { + return &consensus.Proposal{ Height: height, Round: round, BlockID: blockID, diff --git a/privval/grpc/client.go b/privval/grpc/client.go index 77f3930aa..17c223b1a 100644 --- a/privval/grpc/client.go +++ b/privval/grpc/client.go @@ -9,9 +9,9 @@ import ( "github.com/tendermint/tendermint/crypto" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/consensus" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) // SignerClient implements PrivValidator. @@ -24,7 +24,7 @@ type SignerClient struct { chainID string } -var _ types.PrivValidator = (*SignerClient)(nil) +var _ consensus.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) diff --git a/privval/grpc/client_test.go b/privval/grpc/client_test.go index 98730df19..59ec2a11f 100644 --- a/privval/grpc/client_test.go +++ b/privval/grpc/client_test.go @@ -15,15 +15,16 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" tmgrpc "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) const chainID = "chain-id" -func dialer(pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(context.Context, string) (net.Conn, error)) { +func dialer(pv consensus.PrivValidator, logger log.Logger) (*grpc.Server, func(context.Context, string) (net.Conn, error)) { listener := bufconn.Listen(1024 * 1024) server := grpc.NewServer() @@ -46,7 +47,7 @@ func dialer(pv types.PrivValidator, logger log.Logger) (*grpc.Server, func(conte func TestSignerClient_GetPubKey(t *testing.T) { ctx := context.Background() - mockPV := types.NewMockPV() + mockPV := consensus.NewMockPV() logger := log.TestingLogger() srv, dialer := dialer(mockPV, logger) defer srv.Stop() @@ -68,7 +69,7 @@ func TestSignerClient_GetPubKey(t *testing.T) { func TestSignerClient_SignVote(t *testing.T) { ctx := context.Background() - mockPV := types.NewMockPV() + mockPV := consensus.NewMockPV() logger := log.TestingLogger() srv, dialer := dialer(mockPV, logger) defer srv.Stop() @@ -86,21 +87,21 @@ func TestSignerClient_SignVote(t *testing.T) { hash := tmrand.Bytes(tmhash.Size) valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ + want := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, } - have := &types.Vote{ + have := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, @@ -121,7 +122,7 @@ func TestSignerClient_SignVote(t *testing.T) { func TestSignerClient_SignProposal(t *testing.T) { ctx := context.Background() - mockPV := types.NewMockPV() + mockPV := consensus.NewMockPV() logger := log.TestingLogger() srv, dialer := dialer(mockPV, logger) defer srv.Stop() @@ -138,20 +139,20 @@ func TestSignerClient_SignProposal(t *testing.T) { ts := time.Now() hash := tmrand.Bytes(tmhash.Size) - have := &types.Proposal{ + have := &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, } - want := &types.Proposal{ + want := &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, } diff --git a/privval/grpc/server.go b/privval/grpc/server.go index f5c434b1b..76bd0e07f 100644 --- a/privval/grpc/server.go +++ b/privval/grpc/server.go @@ -9,8 +9,8 @@ import ( "github.com/tendermint/tendermint/crypto" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/consensus" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" - "github.com/tendermint/tendermint/types" ) // SignerServer implements PrivValidatorAPIServer 9generated via protobuf services) @@ -18,11 +18,11 @@ import ( type SignerServer struct { logger log.Logger chainID string - privVal types.PrivValidator + privVal consensus.PrivValidator } func NewSignerServer(chainID string, - privVal types.PrivValidator, log log.Logger) *SignerServer { + privVal consensus.PrivValidator, log log.Logger) *SignerServer { return &SignerServer{ logger: log, diff --git a/privval/grpc/server_test.go b/privval/grpc/server_test.go index 9fec9f2fd..21858e411 100644 --- a/privval/grpc/server_test.go +++ b/privval/grpc/server_test.go @@ -12,10 +12,11 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" tmgrpc "github.com/tendermint/tendermint/privval/grpc" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) const ChainID = "123" @@ -24,11 +25,11 @@ func TestGetPubKey(t *testing.T) { testCases := []struct { name string - pv types.PrivValidator + pv consensus.PrivValidator err bool }{ - {name: "valid", pv: types.NewMockPV(), err: false}, - {name: "error on pubkey", pv: types.NewErroringMockPV(), err: true}, + {name: "valid", pv: consensus.NewMockPV(), err: false}, + {name: "error on pubkey", pv: consensus.NewErroringMockPV(), err: true}, } for _, tc := range testCases { @@ -58,42 +59,42 @@ func TestSignVote(t *testing.T) { testCases := []struct { name string - pv types.PrivValidator - have, want *types.Vote + pv consensus.PrivValidator + have, want *consensus.Vote err bool }{ - {name: "valid", pv: types.NewMockPV(), have: &types.Vote{ + {name: "valid", pv: consensus.NewMockPV(), have: &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, - }, want: &types.Vote{ + }, want: &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, }, err: false}, - {name: "invalid vote", pv: types.NewErroringMockPV(), have: &types.Vote{ + {name: "invalid vote", pv: consensus.NewErroringMockPV(), have: &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, Signature: []byte("signed"), - }, want: &types.Vote{ + }, want: &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, @@ -129,40 +130,40 @@ func TestSignProposal(t *testing.T) { testCases := []struct { name string - pv types.PrivValidator - have, want *types.Proposal + pv consensus.PrivValidator + have, want *consensus.Proposal err bool }{ - {name: "valid", pv: types.NewMockPV(), have: &types.Proposal{ + {name: "valid", pv: consensus.NewMockPV(), have: &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, - }, want: &types.Proposal{ + }, want: &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, }, err: false}, - {name: "invalid proposal", pv: types.NewErroringMockPV(), have: &types.Proposal{ + {name: "invalid proposal", pv: consensus.NewErroringMockPV(), have: &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, Signature: []byte("signed"), - }, want: &types.Proposal{ + }, want: &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, Signature: []byte("signed"), }, diff --git a/privval/msgs_test.go b/privval/msgs_test.go index bf532bd7b..cdeaf1f57 100644 --- a/privval/msgs_test.go +++ b/privval/msgs_test.go @@ -12,23 +12,24 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) var stamp = time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC) -func exampleVote() *types.Vote { - return &types.Vote{ +func exampleVote() *consensus.Vote { + return &consensus.Vote{ Type: tmproto.SignedMsgType(1), Height: 3, Round: 2, Timestamp: stamp, - BlockID: types.BlockID{ + BlockID: metadata.BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: 1000000, Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, @@ -38,18 +39,18 @@ func exampleVote() *types.Vote { } } -func exampleProposal() *types.Proposal { +func exampleProposal() *consensus.Proposal { - return &types.Proposal{ + return &consensus.Proposal{ Type: tmproto.SignedMsgType(1), Height: 3, Round: 2, Timestamp: stamp, POLRound: 2, Signature: []byte("it's a signature"), - BlockID: types.BlockID{ + BlockID: metadata.BlockID{ Hash: tmhash.Sum([]byte("blockID_hash")), - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: 1000000, Hash: tmhash.Sum([]byte("blockID_part_set_header_hash")), }, diff --git a/privval/retry_signer_client.go b/privval/retry_signer_client.go index ccd9834e4..ebd34078b 100644 --- a/privval/retry_signer_client.go +++ b/privval/retry_signer_client.go @@ -6,8 +6,8 @@ import ( "time" "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/pkg/consensus" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) // RetrySignerClient wraps SignerClient adding retry for each operation (except @@ -24,7 +24,7 @@ func NewRetrySignerClient(sc *SignerClient, retries int, timeout time.Duration) return &RetrySignerClient{sc, retries, timeout} } -var _ types.PrivValidator = (*RetrySignerClient)(nil) +var _ consensus.PrivValidator = (*RetrySignerClient)(nil) func (sc *RetrySignerClient) Close() error { return sc.next.Close() diff --git a/privval/signer_client.go b/privval/signer_client.go index d25584c8f..2b22be202 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -7,9 +7,9 @@ import ( "github.com/tendermint/tendermint/crypto" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/pkg/consensus" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) // SignerClient implements PrivValidator. @@ -19,7 +19,7 @@ type SignerClient struct { chainID string } -var _ types.PrivValidator = (*SignerClient)(nil) +var _ consensus.PrivValidator = (*SignerClient)(nil) // NewSignerClient returns an instance of SignerClient. // it will start the endpoint (if not already started) diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index 9aa49e709..1356b0e6c 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -12,15 +12,16 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) type signerTestCase struct { chainID string - mockPV types.PrivValidator + mockPV consensus.PrivValidator signerClient *SignerClient signerServer *SignerServer } @@ -31,7 +32,7 @@ func getSignerTestCases(t *testing.T) []signerTestCase { // Get test cases for each possible dialer (DialTCP / DialUnix / etc) for _, dtc := range getDialerTestCases(t) { chainID := tmrand.Str(12) - mockPV := types.NewMockPV() + mockPV := consensus.NewMockPV() // get a pair of signer listener, signer dialer endpoints sl, sd := getMockEndpoints(t, dtc.addr, dtc.dialer) @@ -119,20 +120,20 @@ func TestSignerProposal(t *testing.T) { for _, tc := range getSignerTestCases(t) { ts := time.Now() hash := tmrand.Bytes(tmhash.Size) - have := &types.Proposal{ + have := &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, } - want := &types.Proposal{ + want := &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, } @@ -160,21 +161,21 @@ func TestSignerVote(t *testing.T) { ts := time.Now() hash := tmrand.Bytes(tmhash.Size) valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ + want := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, } - have := &types.Vote{ + have := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, @@ -204,21 +205,21 @@ func TestSignerVoteResetDeadline(t *testing.T) { ts := time.Now() hash := tmrand.Bytes(tmhash.Size) valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ + want := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, } - have := &types.Vote{ + have := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, @@ -258,21 +259,21 @@ func TestSignerVoteKeepAlive(t *testing.T) { ts := time.Now() hash := tmrand.Bytes(tmhash.Size) valAddr := tmrand.Bytes(crypto.AddressSize) - want := &types.Vote{ + want := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, } - have := &types.Vote{ + have := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, @@ -309,8 +310,8 @@ func TestSignerVoteKeepAlive(t *testing.T) { func TestSignerSignProposalErrors(t *testing.T) { for _, tc := range getSignerTestCases(t) { // Replace service with a mock that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() + tc.signerServer.privVal = consensus.NewErroringMockPV() + tc.mockPV = consensus.NewErroringMockPV() tc := tc t.Cleanup(func() { @@ -326,18 +327,18 @@ func TestSignerSignProposalErrors(t *testing.T) { ts := time.Now() hash := tmrand.Bytes(tmhash.Size) - proposal := &types.Proposal{ + proposal := &consensus.Proposal{ Type: tmproto.ProposalType, Height: 1, Round: 2, POLRound: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, Signature: []byte("signature"), } err := tc.signerClient.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + require.Equal(t, err.(*RemoteSignerError).Description, consensus.ErroringMockPVErr.Error()) err = tc.mockPV.SignProposal(context.Background(), tc.chainID, proposal.ToProto()) require.Error(t, err) @@ -352,11 +353,11 @@ func TestSignerSignVoteErrors(t *testing.T) { ts := time.Now() hash := tmrand.Bytes(tmhash.Size) valAddr := tmrand.Bytes(crypto.AddressSize) - vote := &types.Vote{ + vote := &consensus.Vote{ Type: tmproto.PrecommitType, Height: 1, Round: 2, - BlockID: types.BlockID{Hash: hash, PartSetHeader: types.PartSetHeader{Hash: hash, Total: 2}}, + BlockID: metadata.BlockID{Hash: hash, PartSetHeader: metadata.PartSetHeader{Hash: hash, Total: 2}}, Timestamp: ts, ValidatorAddress: valAddr, ValidatorIndex: 1, @@ -364,8 +365,8 @@ func TestSignerSignVoteErrors(t *testing.T) { } // Replace signer service privval with one that always fails - tc.signerServer.privVal = types.NewErroringMockPV() - tc.mockPV = types.NewErroringMockPV() + tc.signerServer.privVal = consensus.NewErroringMockPV() + tc.mockPV = consensus.NewErroringMockPV() tc := tc t.Cleanup(func() { @@ -380,7 +381,7 @@ func TestSignerSignVoteErrors(t *testing.T) { }) err := tc.signerClient.SignVote(context.Background(), tc.chainID, vote.ToProto()) - require.Equal(t, err.(*RemoteSignerError).Description, types.ErroringMockPVErr.Error()) + require.Equal(t, err.(*RemoteSignerError).Description, consensus.ErroringMockPVErr.Error()) err = tc.mockPV.SignVote(context.Background(), tc.chainID, vote.ToProto()) require.Error(t, err) @@ -390,7 +391,7 @@ func TestSignerSignVoteErrors(t *testing.T) { } } -func brokenHandler(ctx context.Context, privVal types.PrivValidator, request privvalproto.Message, +func brokenHandler(ctx context.Context, privVal consensus.PrivValidator, request privvalproto.Message, chainID string) (privvalproto.Message, error) { var res privvalproto.Message var err error @@ -414,8 +415,8 @@ func brokenHandler(ctx context.Context, privVal types.PrivValidator, request pri func TestSignerUnexpectedResponse(t *testing.T) { for _, tc := range getSignerTestCases(t) { - tc.signerServer.privVal = types.NewMockPV() - tc.mockPV = types.NewMockPV() + tc.signerServer.privVal = consensus.NewMockPV() + tc.mockPV = consensus.NewMockPV() tc.signerServer.SetRequestHandler(brokenHandler) @@ -432,7 +433,7 @@ func TestSignerUnexpectedResponse(t *testing.T) { }) ts := time.Now() - want := &types.Vote{Timestamp: ts, Type: tmproto.PrecommitType} + want := &consensus.Vote{Timestamp: ts, Type: tmproto.PrecommitType} e := tc.signerClient.SignVote(context.Background(), tc.chainID, want.ToProto()) assert.EqualError(t, e, "empty response") diff --git a/privval/signer_listener_endpoint_test.go b/privval/signer_listener_endpoint_test.go index cbd45e6ce..014b59176 100644 --- a/privval/signer_listener_endpoint_test.go +++ b/privval/signer_listener_endpoint_test.go @@ -12,7 +12,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/consensus" ) var ( @@ -68,7 +68,7 @@ func TestSignerRemoteRetryTCPOnly(t *testing.T) { SignerDialerEndpointConnRetries(retries)(dialerEndpoint) chainID := tmrand.Str(12) - mockPV := types.NewMockPV() + mockPV := consensus.NewMockPV() signerServer := NewSignerServer(dialerEndpoint, chainID, mockPV) err = signerServer.Start() @@ -92,7 +92,7 @@ func TestRetryConnToRemoteSigner(t *testing.T) { var ( logger = log.TestingLogger() chainID = tmrand.Str(12) - mockPV = types.NewMockPV() + mockPV = consensus.NewMockPV() endpointIsOpenCh = make(chan struct{}) thisConnTimeout = testTimeoutReadWrite listenerEndpoint = newSignerListenerEndpoint(logger, tc.addr, thisConnTimeout) diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index 18ad8a996..d900be297 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -6,15 +6,15 @@ import ( "github.com/tendermint/tendermint/crypto" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/pkg/consensus" cryptoproto "github.com/tendermint/tendermint/proto/tendermint/crypto" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) func DefaultValidationRequestHandler( ctx context.Context, - privVal types.PrivValidator, + privVal consensus.PrivValidator, req privvalproto.Message, chainID string, ) (privvalproto.Message, error) { diff --git a/privval/signer_server.go b/privval/signer_server.go index 24bf67cc5..96d64c83f 100644 --- a/privval/signer_server.go +++ b/privval/signer_server.go @@ -6,14 +6,14 @@ import ( tmsync "github.com/tendermint/tendermint/internal/libs/sync" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/consensus" privvalproto "github.com/tendermint/tendermint/proto/tendermint/privval" - "github.com/tendermint/tendermint/types" ) // ValidationRequestHandlerFunc handles different remoteSigner requests type ValidationRequestHandlerFunc func( ctx context.Context, - privVal types.PrivValidator, + privVal consensus.PrivValidator, requestMessage privvalproto.Message, chainID string) (privvalproto.Message, error) @@ -22,13 +22,13 @@ type SignerServer struct { endpoint *SignerDialerEndpoint chainID string - privVal types.PrivValidator + privVal consensus.PrivValidator handlerMtx tmsync.Mutex validationRequestHandler ValidationRequestHandlerFunc } -func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal types.PrivValidator) *SignerServer { +func NewSignerServer(endpoint *SignerDialerEndpoint, chainID string, privVal consensus.PrivValidator) *SignerServer { ss := &SignerServer{ endpoint: endpoint, chainID: chainID, diff --git a/proto/tendermint/abci/types.proto b/proto/tendermint/abci/types.proto index 7126488d0..4dc9389e5 100644 --- a/proto/tendermint/abci/types.proto +++ b/proto/tendermint/abci/types.proto @@ -1,7 +1,7 @@ syntax = "proto3"; package tendermint.abci; -option go_package = "github.com/tendermint/tendermint/abci/types"; +option go_package = "github.com/tendermint/tendermint/pkg/abci"; // For more information on gogo.proto, see: // https://github.com/gogo/protobuf/blob/master/extensions.md diff --git a/proto/tendermint/blocksync/message_test.go b/proto/tendermint/blocksync/message_test.go index dd1aebbd0..b6460735e 100644 --- a/proto/tendermint/blocksync/message_test.go +++ b/proto/tendermint/blocksync/message_test.go @@ -8,8 +8,9 @@ import ( proto "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/mempool" bcproto "github.com/tendermint/tendermint/proto/tendermint/blocksync" - "github.com/tendermint/tendermint/types" ) func TestBlockRequest_Validate(t *testing.T) { @@ -86,7 +87,7 @@ func TestStatusResponse_Validate(t *testing.T) { // nolint:lll func TestBlockchainMessageVectors(t *testing.T) { - block := types.MakeBlock(int64(3), []types.Tx{types.Tx("Hello World")}, nil, nil) + block := block.MakeBlock(int64(3), []mempool.Tx{mempool.Tx("Hello World")}, nil, nil) block.Version.Block = 11 // overwrite updated protocol version bpb, err := block.ToProto() diff --git a/proto/tendermint/state/types.pb.go b/proto/tendermint/state/types.pb.go index 85f38cada..c7ac4c60a 100644 --- a/proto/tendermint/state/types.pb.go +++ b/proto/tendermint/state/types.pb.go @@ -9,7 +9,7 @@ import ( proto "github.com/gogo/protobuf/proto" _ "github.com/gogo/protobuf/types" github_com_gogo_protobuf_types "github.com/gogo/protobuf/types" - types "github.com/tendermint/tendermint/abci/types" + types "github.com/tendermint/tendermint/pkg/abci" types1 "github.com/tendermint/tendermint/proto/tendermint/types" version "github.com/tendermint/tendermint/proto/tendermint/version" io "io" diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 8eb90daf3..40f82d67f 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -4,7 +4,7 @@ import ( "context" abcicli "github.com/tendermint/tendermint/abci/client" - "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" ) //go:generate ../scripts/mockery_generate.sh AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot @@ -16,20 +16,20 @@ type AppConnConsensus interface { SetResponseCallback(abcicli.Callback) Error() error - InitChainSync(context.Context, types.RequestInitChain) (*types.ResponseInitChain, error) + InitChainSync(context.Context, abci.RequestInitChain) (*abci.ResponseInitChain, error) - BeginBlockSync(context.Context, types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(context.Context, types.RequestDeliverTx) (*abcicli.ReqRes, error) - EndBlockSync(context.Context, types.RequestEndBlock) (*types.ResponseEndBlock, error) - CommitSync(context.Context) (*types.ResponseCommit, error) + BeginBlockSync(context.Context, abci.RequestBeginBlock) (*abci.ResponseBeginBlock, error) + DeliverTxAsync(context.Context, abci.RequestDeliverTx) (*abcicli.ReqRes, error) + EndBlockSync(context.Context, abci.RequestEndBlock) (*abci.ResponseEndBlock, error) + CommitSync(context.Context) (*abci.ResponseCommit, error) } type AppConnMempool interface { SetResponseCallback(abcicli.Callback) Error() error - CheckTxAsync(context.Context, types.RequestCheckTx) (*abcicli.ReqRes, error) - CheckTxSync(context.Context, types.RequestCheckTx) (*types.ResponseCheckTx, error) + CheckTxAsync(context.Context, abci.RequestCheckTx) (*abcicli.ReqRes, error) + CheckTxSync(context.Context, abci.RequestCheckTx) (*abci.ResponseCheckTx, error) FlushAsync(context.Context) (*abcicli.ReqRes, error) FlushSync(context.Context) error @@ -38,18 +38,18 @@ type AppConnMempool interface { type AppConnQuery interface { Error() error - EchoSync(context.Context, string) (*types.ResponseEcho, error) - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) - QuerySync(context.Context, types.RequestQuery) (*types.ResponseQuery, error) + EchoSync(context.Context, string) (*abci.ResponseEcho, error) + InfoSync(context.Context, abci.RequestInfo) (*abci.ResponseInfo, error) + QuerySync(context.Context, abci.RequestQuery) (*abci.ResponseQuery, error) } type AppConnSnapshot interface { Error() error - ListSnapshotsSync(context.Context, types.RequestListSnapshots) (*types.ResponseListSnapshots, error) - OfferSnapshotSync(context.Context, types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) - LoadSnapshotChunkSync(context.Context, types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) - ApplySnapshotChunkSync(context.Context, types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) + ListSnapshotsSync(context.Context, abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) + OfferSnapshotSync(context.Context, abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(context.Context, abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(context.Context, abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) } //----------------------------------------------------------------------------------------- @@ -75,30 +75,30 @@ func (app *appConnConsensus) Error() error { func (app *appConnConsensus) InitChainSync( ctx context.Context, - req types.RequestInitChain, -) (*types.ResponseInitChain, error) { + req abci.RequestInitChain, +) (*abci.ResponseInitChain, error) { return app.appConn.InitChainSync(ctx, req) } func (app *appConnConsensus) BeginBlockSync( ctx context.Context, - req types.RequestBeginBlock, -) (*types.ResponseBeginBlock, error) { + req abci.RequestBeginBlock, +) (*abci.ResponseBeginBlock, error) { return app.appConn.BeginBlockSync(ctx, req) } -func (app *appConnConsensus) DeliverTxAsync(ctx context.Context, req types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (app *appConnConsensus) DeliverTxAsync(ctx context.Context, req abci.RequestDeliverTx) (*abcicli.ReqRes, error) { return app.appConn.DeliverTxAsync(ctx, req) } func (app *appConnConsensus) EndBlockSync( ctx context.Context, - req types.RequestEndBlock, -) (*types.ResponseEndBlock, error) { + req abci.RequestEndBlock, +) (*abci.ResponseEndBlock, error) { return app.appConn.EndBlockSync(ctx, req) } -func (app *appConnConsensus) CommitSync(ctx context.Context) (*types.ResponseCommit, error) { +func (app *appConnConsensus) CommitSync(ctx context.Context) (*abci.ResponseCommit, error) { return app.appConn.CommitSync(ctx) } @@ -131,11 +131,11 @@ func (app *appConnMempool) FlushSync(ctx context.Context) error { return app.appConn.FlushSync(ctx) } -func (app *appConnMempool) CheckTxAsync(ctx context.Context, req types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (app *appConnMempool) CheckTxAsync(ctx context.Context, req abci.RequestCheckTx) (*abcicli.ReqRes, error) { return app.appConn.CheckTxAsync(ctx, req) } -func (app *appConnMempool) CheckTxSync(ctx context.Context, req types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (app *appConnMempool) CheckTxSync(ctx context.Context, req abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { return app.appConn.CheckTxSync(ctx, req) } @@ -156,15 +156,15 @@ func (app *appConnQuery) Error() error { return app.appConn.Error() } -func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*types.ResponseEcho, error) { +func (app *appConnQuery) EchoSync(ctx context.Context, msg string) (*abci.ResponseEcho, error) { return app.appConn.EchoSync(ctx, msg) } -func (app *appConnQuery) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { +func (app *appConnQuery) InfoSync(ctx context.Context, req abci.RequestInfo) (*abci.ResponseInfo, error) { return app.appConn.InfoSync(ctx, req) } -func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery types.RequestQuery) (*types.ResponseQuery, error) { +func (app *appConnQuery) QuerySync(ctx context.Context, reqQuery abci.RequestQuery) (*abci.ResponseQuery, error) { return app.appConn.QuerySync(ctx, reqQuery) } @@ -187,26 +187,26 @@ func (app *appConnSnapshot) Error() error { func (app *appConnSnapshot) ListSnapshotsSync( ctx context.Context, - req types.RequestListSnapshots, -) (*types.ResponseListSnapshots, error) { + req abci.RequestListSnapshots, +) (*abci.ResponseListSnapshots, error) { return app.appConn.ListSnapshotsSync(ctx, req) } func (app *appConnSnapshot) OfferSnapshotSync( ctx context.Context, - req types.RequestOfferSnapshot, -) (*types.ResponseOfferSnapshot, error) { + req abci.RequestOfferSnapshot, +) (*abci.ResponseOfferSnapshot, error) { return app.appConn.OfferSnapshotSync(ctx, req) } func (app *appConnSnapshot) LoadSnapshotChunkSync( ctx context.Context, - req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + req abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { return app.appConn.LoadSnapshotChunkSync(ctx, req) } func (app *appConnSnapshot) ApplySnapshotChunkSync( ctx context.Context, - req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + req abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { return app.appConn.ApplySnapshotChunkSync(ctx, req) } diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go index 458088635..b6b252a77 100644 --- a/proxy/app_conn_test.go +++ b/proxy/app_conn_test.go @@ -9,9 +9,9 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/server" - "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" ) //---------------------------------------- @@ -19,7 +19,7 @@ import ( type appConnTestI interface { EchoAsync(ctx context.Context, msg string) (*abcicli.ReqRes, error) FlushSync(context.Context) error - InfoSync(context.Context, types.RequestInfo) (*types.ResponseInfo, error) + InfoSync(context.Context, abci.RequestInfo) (*abci.ResponseInfo, error) } type appConnTest struct { @@ -38,7 +38,7 @@ func (app *appConnTest) FlushSync(ctx context.Context) error { return app.appConn.FlushSync(ctx) } -func (app *appConnTest) InfoSync(ctx context.Context, req types.RequestInfo) (*types.ResponseInfo, error) { +func (app *appConnTest) InfoSync(ctx context.Context, req abci.RequestInfo) (*abci.ResponseInfo, error) { return app.appConn.InfoSync(ctx, req) } @@ -143,7 +143,7 @@ func BenchmarkEcho(b *testing.B) { } b.StopTimer() - // info := proxy.InfoSync(types.RequestInfo{""}) + // info := proxy.InfoSync(abci.RequestInfo{""}) // b.Log("N: ", b.N, info) } diff --git a/proxy/client.go b/proxy/client.go index 929933e01..6ceabb13e 100644 --- a/proxy/client.go +++ b/proxy/client.go @@ -6,8 +6,8 @@ import ( abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/abci/types" tmsync "github.com/tendermint/tendermint/internal/libs/sync" + "github.com/tendermint/tendermint/pkg/abci" ) //go:generate ../scripts/mockery_generate.sh ClientCreator @@ -23,12 +23,12 @@ type ClientCreator interface { type localClientCreator struct { mtx *tmsync.RWMutex - app types.Application + app abci.Application } // NewLocalClientCreator returns a ClientCreator for the given app, // which will be running locally. -func NewLocalClientCreator(app types.Application) ClientCreator { +func NewLocalClientCreator(app abci.Application) ClientCreator { return &localClientCreator{ mtx: new(tmsync.RWMutex), app: app, @@ -82,7 +82,7 @@ func DefaultClientCreator(addr, transport, dbDir string) (ClientCreator, io.Clos app := kvstore.NewPersistentKVStoreApplication(dbDir) return NewLocalClientCreator(app), app case "noop": - return NewLocalClientCreator(types.NewBaseApplication()), noopCloser{} + return NewLocalClientCreator(abci.NewBaseApplication()), noopCloser{} default: mustConnect := false // loop retrying return NewRemoteClientCreator(addr, transport, mustConnect), noopCloser{} diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go index 03207706e..f1dcff2cd 100644 --- a/proxy/mocks/app_conn_consensus.go +++ b/proxy/mocks/app_conn_consensus.go @@ -3,13 +3,12 @@ package mocks import ( + abcicli "github.com/tendermint/tendermint/abci/client" + abci "github.com/tendermint/tendermint/pkg/abci" + context "context" - abcicli "github.com/tendermint/tendermint/abci/client" - mock "github.com/stretchr/testify/mock" - - types "github.com/tendermint/tendermint/abci/types" ) // AppConnConsensus is an autogenerated mock type for the AppConnConsensus type @@ -18,20 +17,20 @@ type AppConnConsensus struct { } // BeginBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { +func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 abci.RequestBeginBlock) (*abci.ResponseBeginBlock, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseBeginBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + var r0 *abci.ResponseBeginBlock + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestBeginBlock) *abci.ResponseBeginBlock); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseBeginBlock) + r0 = ret.Get(0).(*abci.ResponseBeginBlock) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestBeginBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestBeginBlock) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -41,15 +40,15 @@ func (_m *AppConnConsensus) BeginBlockSync(_a0 context.Context, _a1 types.Reques } // CommitSync provides a mock function with given fields: _a0 -func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseCommit, error) { +func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*abci.ResponseCommit, error) { ret := _m.Called(_a0) - var r0 *types.ResponseCommit - if rf, ok := ret.Get(0).(func(context.Context) *types.ResponseCommit); ok { + var r0 *abci.ResponseCommit + if rf, ok := ret.Get(0).(func(context.Context) *abci.ResponseCommit); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCommit) + r0 = ret.Get(0).(*abci.ResponseCommit) } } @@ -64,11 +63,11 @@ func (_m *AppConnConsensus) CommitSync(_a0 context.Context) (*types.ResponseComm } // DeliverTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.RequestDeliverTx) (*abcicli.ReqRes, error) { +func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 abci.RequestDeliverTx) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestDeliverTx) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestDeliverTx) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -77,7 +76,7 @@ func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.Reques } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestDeliverTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestDeliverTx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -87,20 +86,20 @@ func (_m *AppConnConsensus) DeliverTxAsync(_a0 context.Context, _a1 types.Reques } // EndBlockSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) EndBlockSync(_a0 context.Context, _a1 types.RequestEndBlock) (*types.ResponseEndBlock, error) { +func (_m *AppConnConsensus) EndBlockSync(_a0 context.Context, _a1 abci.RequestEndBlock) (*abci.ResponseEndBlock, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseEndBlock - if rf, ok := ret.Get(0).(func(context.Context, types.RequestEndBlock) *types.ResponseEndBlock); ok { + var r0 *abci.ResponseEndBlock + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestEndBlock) *abci.ResponseEndBlock); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEndBlock) + r0 = ret.Get(0).(*abci.ResponseEndBlock) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestEndBlock) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestEndBlock) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -124,20 +123,20 @@ func (_m *AppConnConsensus) Error() error { } // InitChainSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 types.RequestInitChain) (*types.ResponseInitChain, error) { +func (_m *AppConnConsensus) InitChainSync(_a0 context.Context, _a1 abci.RequestInitChain) (*abci.ResponseInitChain, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseInitChain - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInitChain) *types.ResponseInitChain); ok { + var r0 *abci.ResponseInitChain + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInitChain) *abci.ResponseInitChain); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInitChain) + r0 = ret.Get(0).(*abci.ResponseInitChain) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInitChain) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInitChain) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go index 2505160d6..56b535394 100644 --- a/proxy/mocks/app_conn_mempool.go +++ b/proxy/mocks/app_conn_mempool.go @@ -3,13 +3,12 @@ package mocks import ( + abcicli "github.com/tendermint/tendermint/abci/client" + abci "github.com/tendermint/tendermint/pkg/abci" + context "context" - abcicli "github.com/tendermint/tendermint/abci/client" - mock "github.com/stretchr/testify/mock" - - types "github.com/tendermint/tendermint/abci/types" ) // AppConnMempool is an autogenerated mock type for the AppConnMempool type @@ -18,11 +17,11 @@ type AppConnMempool struct { } // CheckTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestCheckTx) (*abcicli.ReqRes, error) { +func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 abci.RequestCheckTx) (*abcicli.ReqRes, error) { ret := _m.Called(_a0, _a1) var r0 *abcicli.ReqRes - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *abcicli.ReqRes); ok { + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestCheckTx) *abcicli.ReqRes); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -31,7 +30,7 @@ func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestChe } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestCheckTx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -41,20 +40,20 @@ func (_m *AppConnMempool) CheckTxAsync(_a0 context.Context, _a1 types.RequestChe } // CheckTxSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnMempool) CheckTxSync(_a0 context.Context, _a1 types.RequestCheckTx) (*types.ResponseCheckTx, error) { +func (_m *AppConnMempool) CheckTxSync(_a0 context.Context, _a1 abci.RequestCheckTx) (*abci.ResponseCheckTx, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseCheckTx - if rf, ok := ret.Get(0).(func(context.Context, types.RequestCheckTx) *types.ResponseCheckTx); ok { + var r0 *abci.ResponseCheckTx + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestCheckTx) *abci.ResponseCheckTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseCheckTx) + r0 = ret.Get(0).(*abci.ResponseCheckTx) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestCheckTx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestCheckTx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go index 47ac5bef9..a03f34011 100644 --- a/proxy/mocks/app_conn_query.go +++ b/proxy/mocks/app_conn_query.go @@ -5,9 +5,9 @@ package mocks import ( context "context" - mock "github.com/stretchr/testify/mock" + abci "github.com/tendermint/tendermint/pkg/abci" - types "github.com/tendermint/tendermint/abci/types" + mock "github.com/stretchr/testify/mock" ) // AppConnQuery is an autogenerated mock type for the AppConnQuery type @@ -16,15 +16,15 @@ type AppConnQuery struct { } // EchoSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) EchoSync(_a0 context.Context, _a1 string) (*types.ResponseEcho, error) { +func (_m *AppConnQuery) EchoSync(_a0 context.Context, _a1 string) (*abci.ResponseEcho, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseEcho - if rf, ok := ret.Get(0).(func(context.Context, string) *types.ResponseEcho); ok { + var r0 *abci.ResponseEcho + if rf, ok := ret.Get(0).(func(context.Context, string) *abci.ResponseEcho); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseEcho) + r0 = ret.Get(0).(*abci.ResponseEcho) } } @@ -53,20 +53,20 @@ func (_m *AppConnQuery) Error() error { } // InfoSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*types.ResponseInfo, error) { +func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 abci.RequestInfo) (*abci.ResponseInfo, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseInfo - if rf, ok := ret.Get(0).(func(context.Context, types.RequestInfo) *types.ResponseInfo); ok { + var r0 *abci.ResponseInfo + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestInfo) *abci.ResponseInfo); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseInfo) + r0 = ret.Get(0).(*abci.ResponseInfo) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestInfo) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestInfo) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -76,20 +76,20 @@ func (_m *AppConnQuery) InfoSync(_a0 context.Context, _a1 types.RequestInfo) (*t } // QuerySync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 types.RequestQuery) (*types.ResponseQuery, error) { +func (_m *AppConnQuery) QuerySync(_a0 context.Context, _a1 abci.RequestQuery) (*abci.ResponseQuery, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseQuery - if rf, ok := ret.Get(0).(func(context.Context, types.RequestQuery) *types.ResponseQuery); ok { + var r0 *abci.ResponseQuery + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestQuery) *abci.ResponseQuery); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseQuery) + r0 = ret.Get(0).(*abci.ResponseQuery) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestQuery) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestQuery) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go index 0b6f10ce1..f7acc0a06 100644 --- a/proxy/mocks/app_conn_snapshot.go +++ b/proxy/mocks/app_conn_snapshot.go @@ -5,9 +5,9 @@ package mocks import ( context "context" - mock "github.com/stretchr/testify/mock" + abci "github.com/tendermint/tendermint/pkg/abci" - types "github.com/tendermint/tendermint/abci/types" + mock "github.com/stretchr/testify/mock" ) // AppConnSnapshot is an autogenerated mock type for the AppConnSnapshot type @@ -16,20 +16,20 @@ type AppConnSnapshot struct { } // ApplySnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 context.Context, _a1 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { +func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 context.Context, _a1 abci.RequestApplySnapshotChunk) (*abci.ResponseApplySnapshotChunk, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseApplySnapshotChunk - if rf, ok := ret.Get(0).(func(context.Context, types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + var r0 *abci.ResponseApplySnapshotChunk + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestApplySnapshotChunk) *abci.ResponseApplySnapshotChunk); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + r0 = ret.Get(0).(*abci.ResponseApplySnapshotChunk) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestApplySnapshotChunk) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestApplySnapshotChunk) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -53,20 +53,20 @@ func (_m *AppConnSnapshot) Error() error { } // ListSnapshotsSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { +func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 abci.RequestListSnapshots) (*abci.ResponseListSnapshots, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseListSnapshots - if rf, ok := ret.Get(0).(func(context.Context, types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + var r0 *abci.ResponseListSnapshots + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestListSnapshots) *abci.ResponseListSnapshots); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseListSnapshots) + r0 = ret.Get(0).(*abci.ResponseListSnapshots) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestListSnapshots) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestListSnapshots) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -76,20 +76,20 @@ func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 context.Context, _a1 types.Requ } // LoadSnapshotChunkSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { +func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 abci.RequestLoadSnapshotChunk) (*abci.ResponseLoadSnapshotChunk, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseLoadSnapshotChunk - if rf, ok := ret.Get(0).(func(context.Context, types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + var r0 *abci.ResponseLoadSnapshotChunk + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestLoadSnapshotChunk) *abci.ResponseLoadSnapshotChunk); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + r0 = ret.Get(0).(*abci.ResponseLoadSnapshotChunk) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestLoadSnapshotChunk) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestLoadSnapshotChunk) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -99,20 +99,20 @@ func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 context.Context, _a1 types. } // OfferSnapshotSync provides a mock function with given fields: _a0, _a1 -func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { +func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 context.Context, _a1 abci.RequestOfferSnapshot) (*abci.ResponseOfferSnapshot, error) { ret := _m.Called(_a0, _a1) - var r0 *types.ResponseOfferSnapshot - if rf, ok := ret.Get(0).(func(context.Context, types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + var r0 *abci.ResponseOfferSnapshot + if rf, ok := ret.Get(0).(func(context.Context, abci.RequestOfferSnapshot) *abci.ResponseOfferSnapshot); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + r0 = ret.Get(0).(*abci.ResponseOfferSnapshot) } } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.RequestOfferSnapshot) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, abci.RequestOfferSnapshot) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/proxy/version.go b/proxy/version.go index 6d70c3a72..2a7f8109e 100644 --- a/proxy/version.go +++ b/proxy/version.go @@ -1,7 +1,7 @@ package proxy import ( - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/version" ) diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 4acd0fee9..a347a4022 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -10,11 +10,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" ) var waitForEventTimeout = 8 * time.Second diff --git a/rpc/client/evidence_test.go b/rpc/client/evidence_test.go index 0ff158e56..79e75f47a 100644 --- a/rpc/client/evidence_test.go +++ b/rpc/client/evidence_test.go @@ -9,15 +9,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + types "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/types" ) // For some reason the empty node used in tests has a time of @@ -27,7 +29,7 @@ import ( var defaultTestTime = time.Date(2018, 10, 10, 8, 20, 13, 695936996, time.UTC) func newEvidence(t *testing.T, val *privval.FilePV, - vote *types.Vote, vote2 *types.Vote, + vote *consensus.Vote, vote2 *consensus.Vote, chainID string) *types.DuplicateVoteEvidence { var err error @@ -35,14 +37,14 @@ func newEvidence(t *testing.T, val *privval.FilePV, v := vote.ToProto() v2 := vote2.ToProto() - vote.Signature, err = val.Key.PrivKey.Sign(types.VoteSignBytes(chainID, v)) + vote.Signature, err = val.Key.PrivKey.Sign(consensus.VoteSignBytes(chainID, v)) require.NoError(t, err) - vote2.Signature, err = val.Key.PrivKey.Sign(types.VoteSignBytes(chainID, v2)) + vote2.Signature, err = val.Key.PrivKey.Sign(consensus.VoteSignBytes(chainID, v2)) require.NoError(t, err) - validator := types.NewValidator(val.Key.PubKey, 10) - valSet := types.NewValidatorSet([]*types.Validator{validator}) + validator := consensus.NewValidator(val.Key.PubKey, 10) + valSet := consensus.NewValidatorSet([]*consensus.Validator{validator}) return types.NewDuplicateVoteEvidence(vote, vote2, defaultTestTime, valSet) } @@ -52,16 +54,16 @@ func makeEvidences( val *privval.FilePV, chainID string, ) (correct *types.DuplicateVoteEvidence, fakes []*types.DuplicateVoteEvidence) { - vote := types.Vote{ + vote := consensus.Vote{ ValidatorAddress: val.Key.Address, ValidatorIndex: 0, Height: 1, Round: 0, Type: tmproto.PrevoteType, Timestamp: defaultTestTime, - BlockID: types.BlockID{ + BlockID: metadata.BlockID{ Hash: tmhash.Sum(tmrand.Bytes(tmhash.Size)), - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: 1000, Hash: tmhash.Sum([]byte("partset")), }, diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 49598e814..3bd98cbc6 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/events" ) // Waiter is informed of current height, decided whether to quit early @@ -57,13 +57,13 @@ func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { // when the timeout duration has expired. // // This handles subscribing and unsubscribing under the hood -func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) (types.TMEventData, error) { +func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) (events.TMEventData, error) { const subscriber = "helpers" ctx, cancel := context.WithTimeout(context.Background(), timeout) defer cancel() // register for the next event of this type - eventCh, err := c.Subscribe(ctx, subscriber, types.QueryForEvent(eventValue).String()) + eventCh, err := c.Subscribe(ctx, subscriber, events.QueryForEvent(eventValue).String()) if err != nil { return nil, fmt.Errorf("failed to subscribe: %w", err) } @@ -77,7 +77,7 @@ func WaitForOneEvent(c EventsClient, eventValue string, timeout time.Duration) ( select { case event := <-eventCh: - return event.Data.(types.TMEventData), nil + return event.Data.(events.TMEventData), nil case <-ctx.Done(): return nil, errors.New("timed out waiting for event") } diff --git a/rpc/client/http/http.go b/rpc/client/http/http.go index 54c56f99f..55fd994d5 100644 --- a/rpc/client/http/http.go +++ b/rpc/client/http/http.go @@ -7,10 +7,11 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/mempool" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" jsonrpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" - "github.com/tendermint/tendermint/types" ) /* @@ -250,7 +251,7 @@ func (c *baseRPCClient) ABCIQueryWithOptions( func (c *baseRPCClient) BroadcastTxCommit( ctx context.Context, - tx types.Tx, + tx mempool.Tx, ) (*ctypes.ResultBroadcastTxCommit, error) { result := new(ctypes.ResultBroadcastTxCommit) _, err := c.caller.Call(ctx, "broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) @@ -262,14 +263,14 @@ func (c *baseRPCClient) BroadcastTxCommit( func (c *baseRPCClient) BroadcastTxAsync( ctx context.Context, - tx types.Tx, + tx mempool.Tx, ) (*ctypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_async", tx) } func (c *baseRPCClient) BroadcastTxSync( ctx context.Context, - tx types.Tx, + tx mempool.Tx, ) (*ctypes.ResultBroadcastTx, error) { return c.broadcastTX(ctx, "broadcast_tx_sync", tx) } @@ -277,7 +278,7 @@ func (c *baseRPCClient) BroadcastTxSync( func (c *baseRPCClient) broadcastTX( ctx context.Context, route string, - tx types.Tx, + tx mempool.Tx, ) (*ctypes.ResultBroadcastTx, error) { result := new(ctypes.ResultBroadcastTx) _, err := c.caller.Call(ctx, route, map[string]interface{}{"tx": tx}, result) @@ -312,7 +313,7 @@ func (c *baseRPCClient) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUn return result, nil } -func (c *baseRPCClient) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *baseRPCClient) CheckTx(ctx context.Context, tx mempool.Tx) (*ctypes.ResultCheckTx, error) { result := new(ctypes.ResultCheckTx) _, err := c.caller.Call(ctx, "check_tx", map[string]interface{}{"tx": tx}, result) if err != nil { @@ -558,7 +559,7 @@ func (c *baseRPCClient) Validators( func (c *baseRPCClient) BroadcastEvidence( ctx context.Context, - ev types.Evidence, + ev evidence.Evidence, ) (*ctypes.ResultBroadcastEvidence, error) { result := new(ctypes.ResultBroadcastEvidence) _, err := c.caller.Call(ctx, "broadcast_evidence", map[string]interface{}{"evidence": ev}, result) diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 3547b42ae..7f9f62179 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -25,8 +25,9 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/mempool" ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" ) //go:generate ../../scripts/mockery_generate.sh Client @@ -58,9 +59,9 @@ type ABCIClient interface { opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) // Writing to abci app - BroadcastTxCommit(context.Context, types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(context.Context, types.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxCommit(context.Context, mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) + BroadcastTxAsync(context.Context, mempool.Tx) (*ctypes.ResultBroadcastTx, error) + BroadcastTxSync(context.Context, mempool.Tx) (*ctypes.ResultBroadcastTx, error) } // SignClient groups together the functionality needed to get valid signatures @@ -136,13 +137,13 @@ type EventsClient interface { type MempoolClient interface { UnconfirmedTxs(ctx context.Context, limit *int) (*ctypes.ResultUnconfirmedTxs, error) NumUnconfirmedTxs(context.Context) (*ctypes.ResultUnconfirmedTxs, error) - CheckTx(context.Context, types.Tx) (*ctypes.ResultCheckTx, error) + CheckTx(context.Context, mempool.Tx) (*ctypes.ResultCheckTx, error) } // EvidenceClient is used for submitting an evidence of the malicious // behavior. type EvidenceClient interface { - BroadcastEvidence(context.Context, types.Evidence) (*ctypes.ResultBroadcastEvidence, error) + BroadcastEvidence(context.Context, evidence.Evidence) (*ctypes.ResultBroadcastEvidence, error) } // RemoteClient is a Client, which can also return the remote network address. diff --git a/rpc/client/local/local.go b/rpc/client/local/local.go index 0663ebf67..ac66cc037 100644 --- a/rpc/client/local/local.go +++ b/rpc/client/local/local.go @@ -9,11 +9,13 @@ import ( "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/mempool" rpcclient "github.com/tendermint/tendermint/rpc/client" rpccore "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) /* @@ -37,7 +39,7 @@ don't need to do anything). It will keep trying indefinitely with exponential backoff (10ms -> 20ms -> 40ms) until successful. */ type Local struct { - *types.EventBus + *events.EventBus Logger log.Logger ctx *rpctypes.Context env *rpccore.Environment @@ -47,7 +49,7 @@ type Local struct { // local RPC client constructor needs to build a local client. type NodeService interface { ConfigureRPC() (*rpccore.Environment, error) - EventBus() *types.EventBus + EventBus() *events.EventBus } // New configures a client that calls the Node directly. @@ -91,15 +93,15 @@ func (c *Local) ABCIQueryWithOptions( return c.env.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } -func (c *Local) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c *Local) BroadcastTxCommit(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(c.ctx, tx) } -func (c *Local) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxAsync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(c.ctx, tx) } -func (c *Local) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c *Local) BroadcastTxSync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(c.ctx, tx) } @@ -111,7 +113,7 @@ func (c *Local) NumUnconfirmedTxs(ctx context.Context) (*ctypes.ResultUnconfirme return c.env.NumUnconfirmedTxs(c.ctx) } -func (c *Local) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c *Local) CheckTx(ctx context.Context, tx mempool.Tx) (*ctypes.ResultCheckTx, error) { return c.env.CheckTx(c.ctx, tx) } @@ -205,7 +207,7 @@ func (c *Local) BlockSearch( return c.env.BlockSearch(c.ctx, query, page, perPage, orderBy) } -func (c *Local) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c *Local) BroadcastEvidence(ctx context.Context, ev evidence.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(c.ctx, ev) } @@ -224,7 +226,7 @@ func (c *Local) Subscribe( outCap = outCapacity[0] } - var sub types.Subscription + var sub events.Subscription if outCap > 0 { sub, err = c.EventBus.Subscribe(ctx, subscriber, q, outCap) } else { @@ -241,7 +243,7 @@ func (c *Local) Subscribe( } func (c *Local) eventsRoutine( - sub types.Subscription, + sub events.Subscription, subscriber string, q tmpubsub.Query, outc chan<- ctypes.ResultEvent) { @@ -281,7 +283,7 @@ func (c *Local) eventsRoutine( } // Try to resubscribe with exponential backoff. -func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) types.Subscription { +func (c *Local) resubscribe(subscriber string, q tmpubsub.Query) events.Subscription { attempts := 0 for { if !c.IsRunning() { diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 0737deec0..66bfac638 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -3,12 +3,12 @@ package mock import ( "context" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" ) // ABCIApp will send all abci related request to the named app, @@ -49,7 +49,7 @@ func (a ABCIApp) ABCIQueryWithOptions( // NOTE: Caller should call a.App.Commit() separately, // this function does not actually wait for a commit. // TODO: Make it wait for a commit and set res.Height appropriately. -func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res := ctypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) if res.CheckTx.IsErr() { @@ -60,7 +60,7 @@ func (a ABCIApp) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.Re return &res, nil } -func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { @@ -75,7 +75,7 @@ func (a ABCIApp) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.Res }, nil } -func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (a ABCIApp) BroadcastTxSync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { c := a.App.CheckTx(abci.RequestCheckTx{Tx: tx}) // and this gets written in a background thread... if !c.IsErr() { @@ -125,7 +125,7 @@ func (m ABCIMock) ABCIQueryWithOptions( return &ctypes.ResultABCIQuery{Response: resQuery}, nil } -func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res, err := m.BroadcastCommit.GetResponse(tx) if err != nil { return nil, err @@ -133,7 +133,7 @@ func (m ABCIMock) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.R return res.(*ctypes.ResultBroadcastTxCommit), nil } -func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err @@ -141,7 +141,7 @@ func (m ABCIMock) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.Re return res.(*ctypes.ResultBroadcastTx), nil } -func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (m ABCIMock) BroadcastTxSync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { res, err := m.Broadcast.GetResponse(tx) if err != nil { return nil, err @@ -207,7 +207,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions( return res, err } -func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res, err := r.Client.BroadcastTxCommit(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_commit", @@ -218,7 +218,7 @@ func (r *ABCIRecorder) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*cty return res, err } -func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxAsync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_async", @@ -229,7 +229,7 @@ func (r *ABCIRecorder) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctyp return res, err } -func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (r *ABCIRecorder) BroadcastTxSync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { res, err := r.Client.BroadcastTxSync(ctx, tx) r.addCall(Call{ Name: "broadcast_tx_sync", diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index d164b275a..6c6fad566 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -10,12 +10,12 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" ) func TestABCIMock(t *testing.T) { @@ -23,8 +23,8 @@ func TestABCIMock(t *testing.T) { key, value := []byte("foo"), []byte("bar") height := int64(10) - goodTx := types.Tx{0x01, 0xff} - badTx := types.Tx{0x12, 0x21} + goodTx := mempool.Tx{0x01, 0xff} + badTx := mempool.Tx{0x12, 0x21} m := mock.ABCIMock{ Info: mock.Call{Error: errors.New("foobar")}, @@ -130,7 +130,7 @@ func TestABCIRecorder(t *testing.T) { assert.False(qa.Prove) // now add some broadcasts (should all err) - txs := []types.Tx{{1}, {2}, {3}} + txs := []mempool.Tx{{1}, {2}, {3}} _, err = r.BroadcastTxCommit(context.Background(), txs[0]) assert.NotNil(err, "expected err on broadcast") _, err = r.BroadcastTxSync(context.Background(), txs[1]) @@ -172,7 +172,7 @@ func TestABCIApp(t *testing.T) { // add a key key, value := "foo", "bar" tx := fmt.Sprintf("%s=%s", key, value) - res, err := m.BroadcastTxCommit(context.Background(), types.Tx(tx)) + res, err := m.BroadcastTxCommit(context.Background(), mempool.Tx(tx)) require.Nil(err) assert.True(res.CheckTx.IsOK()) require.NotNil(res.DeliverTx) diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 57e96fb09..48d56d867 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -20,11 +20,12 @@ import ( "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Client wraps arbitrary implementations of the various interfaces. @@ -104,19 +105,19 @@ func (c Client) ABCIQueryWithOptions( return c.env.ABCIQuery(&rpctypes.Context{}, path, data, opts.Height, opts.Prove) } -func (c Client) BroadcastTxCommit(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { +func (c Client) BroadcastTxCommit(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTxCommit, error) { return c.env.BroadcastTxCommit(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxAsync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxAsync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxAsync(&rpctypes.Context{}, tx) } -func (c Client) BroadcastTxSync(ctx context.Context, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { +func (c Client) BroadcastTxSync(ctx context.Context, tx mempool.Tx) (*ctypes.ResultBroadcastTx, error) { return c.env.BroadcastTxSync(&rpctypes.Context{}, tx) } -func (c Client) CheckTx(ctx context.Context, tx types.Tx) (*ctypes.ResultCheckTx, error) { +func (c Client) CheckTx(ctx context.Context, tx mempool.Tx) (*ctypes.ResultCheckTx, error) { return c.env.CheckTx(&rpctypes.Context{}, tx) } @@ -178,6 +179,6 @@ func (c Client) Validators(ctx context.Context, height *int64, page, perPage *in return c.env.Validators(&rpctypes.Context{}, height, page, perPage) } -func (c Client) BroadcastEvidence(ctx context.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { +func (c Client) BroadcastEvidence(ctx context.Context, ev evidence.Evidence) (*ctypes.ResultBroadcastEvidence, error) { return c.env.BroadcastEvidence(&rpctypes.Context{}, ev) } diff --git a/rpc/client/mocks/client.go b/rpc/client/mocks/client.go index ef374b9a8..fd7873056 100644 --- a/rpc/client/mocks/client.go +++ b/rpc/client/mocks/client.go @@ -10,11 +10,13 @@ import ( coretypes "github.com/tendermint/tendermint/rpc/core/types" + evidence "github.com/tendermint/tendermint/pkg/evidence" + log "github.com/tendermint/tendermint/libs/log" - mock "github.com/stretchr/testify/mock" + mempool "github.com/tendermint/tendermint/pkg/mempool" - types "github.com/tendermint/tendermint/types" + mock "github.com/stretchr/testify/mock" ) // Client is an autogenerated mock type for the Client type @@ -207,11 +209,11 @@ func (_m *Client) BlockchainInfo(ctx context.Context, minHeight int64, maxHeight } // BroadcastEvidence provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*coretypes.ResultBroadcastEvidence, error) { +func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 evidence.Evidence) (*coretypes.ResultBroadcastEvidence, error) { ret := _m.Called(_a0, _a1) var r0 *coretypes.ResultBroadcastEvidence - if rf, ok := ret.Get(0).(func(context.Context, types.Evidence) *coretypes.ResultBroadcastEvidence); ok { + if rf, ok := ret.Get(0).(func(context.Context, evidence.Evidence) *coretypes.ResultBroadcastEvidence); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -220,7 +222,7 @@ func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*c } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Evidence) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, evidence.Evidence) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -230,11 +232,11 @@ func (_m *Client) BroadcastEvidence(_a0 context.Context, _a1 types.Evidence) (*c } // BroadcastTxAsync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { +func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 mempool.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) var r0 *coretypes.ResultBroadcastTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + if rf, ok := ret.Get(0).(func(context.Context, mempool.Tx) *coretypes.ResultBroadcastTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -243,7 +245,7 @@ func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretype } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, mempool.Tx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -253,11 +255,11 @@ func (_m *Client) BroadcastTxAsync(_a0 context.Context, _a1 types.Tx) (*coretype } // BroadcastTxCommit provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTxCommit, error) { +func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 mempool.Tx) (*coretypes.ResultBroadcastTxCommit, error) { ret := _m.Called(_a0, _a1) var r0 *coretypes.ResultBroadcastTxCommit - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTxCommit); ok { + if rf, ok := ret.Get(0).(func(context.Context, mempool.Tx) *coretypes.ResultBroadcastTxCommit); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -266,7 +268,7 @@ func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretyp } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, mempool.Tx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -276,11 +278,11 @@ func (_m *Client) BroadcastTxCommit(_a0 context.Context, _a1 types.Tx) (*coretyp } // BroadcastTxSync provides a mock function with given fields: _a0, _a1 -func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultBroadcastTx, error) { +func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 mempool.Tx) (*coretypes.ResultBroadcastTx, error) { ret := _m.Called(_a0, _a1) var r0 *coretypes.ResultBroadcastTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultBroadcastTx); ok { + if rf, ok := ret.Get(0).(func(context.Context, mempool.Tx) *coretypes.ResultBroadcastTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -289,7 +291,7 @@ func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, mempool.Tx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -299,11 +301,11 @@ func (_m *Client) BroadcastTxSync(_a0 context.Context, _a1 types.Tx) (*coretypes } // CheckTx provides a mock function with given fields: _a0, _a1 -func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultCheckTx, error) { +func (_m *Client) CheckTx(_a0 context.Context, _a1 mempool.Tx) (*coretypes.ResultCheckTx, error) { ret := _m.Called(_a0, _a1) var r0 *coretypes.ResultCheckTx - if rf, ok := ret.Get(0).(func(context.Context, types.Tx) *coretypes.ResultCheckTx); ok { + if rf, ok := ret.Get(0).(func(context.Context, mempool.Tx) *coretypes.ResultCheckTx); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -312,7 +314,7 @@ func (_m *Client) CheckTx(_a0 context.Context, _a1 types.Tx) (*coretypes.ResultC } var r1 error - if rf, ok := ret.Get(1).(func(context.Context, types.Tx) error); ok { + if rf, ok := ret.Get(1).(func(context.Context, mempool.Tx) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index f8962fb35..b28a652d2 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -14,19 +14,20 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/config" mempl "github.com/tendermint/tendermint/internal/mempool" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/service" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/rpc/client" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpclocal "github.com/tendermint/tendermint/rpc/client/local" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" - "github.com/tendermint/tendermint/types" ) func getHTTPClient(t *testing.T, conf *config.Config) *rpchttp.HTTP { @@ -253,7 +254,7 @@ func TestGenesisChunked(t *testing.T) { } doc := []byte(strings.Join(decoded, "")) - var out types.GenesisDoc + var out consensus.GenesisDoc require.NoError(t, tmjson.Unmarshal(doc, &out), "first: %+v, doc: %s", first, string(doc)) } @@ -477,8 +478,8 @@ func TestUnconfirmedTxs(t *testing.T) { ch := make(chan *abci.Response, 1) n, conf := NodeSuite(t) - mempool := getMempool(t, n) - err := mempool.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) + m := getMempool(t, n) + err := m.CheckTx(ctx, tx, func(resp *abci.Response) { ch <- resp }, mempl.TxInfo{}) require.NoError(t, err) @@ -497,11 +498,11 @@ func TestUnconfirmedTxs(t *testing.T) { assert.Equal(t, 1, res.Count) assert.Equal(t, 1, res.Total) - assert.Equal(t, mempool.SizeBytes(), res.TotalBytes) - assert.Exactly(t, types.Txs{tx}, types.Txs(res.Txs)) + assert.Equal(t, m.SizeBytes(), res.TotalBytes) + assert.Exactly(t, mempool.Txs{tx}, mempool.Txs(res.Txs)) } - mempool.Flush() + m.Flush() } func TestNumUnconfirmedTxs(t *testing.T) { @@ -572,7 +573,7 @@ func TestTx(t *testing.T) { txHeight := bres.Height txHash := bres.Hash - anotherTxHash := types.Tx("a different tx").Hash() + anotherTxHash := mempool.Tx("a different tx").Hash() cases := []struct { valid bool @@ -652,7 +653,7 @@ func TestTxSearch(t *testing.T) { // pick out the last tx to have something to search for in tests find := result.Txs[len(result.Txs)-1] - anotherTxHash := types.Tx("a different tx").Hash() + anotherTxHash := mempool.Tx("a different tx").Hash() for i, c := range GetClients(t, n, conf) { t.Logf("client %d", i) diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 613eaec8b..5cebb7038 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -1,8 +1,8 @@ package core import ( - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 081276d0f..3d3dbb433 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -6,10 +6,11 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) // BlockchainInfo gets block headers for minHeight <= height <= maxHeight. @@ -40,7 +41,7 @@ func (env *Environment) BlockchainInfo( } env.Logger.Debug("BlockchainInfo", "maxHeight", maxHeight, "minHeight", minHeight) - blockMetas := make([]*types.BlockMeta, 0, maxHeight-minHeight+1) + blockMetas := make([]*block.BlockMeta, 0, maxHeight-minHeight+1) for height := maxHeight; height >= minHeight; height-- { blockMeta := env.BlockStore.LoadBlockMeta(height) if blockMeta != nil { @@ -98,7 +99,7 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. blockMeta := env.BlockStore.LoadBlockMeta(height) if blockMeta == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &ctypes.ResultBlock{BlockID: metadata.BlockID{}, Block: nil}, nil } block := env.BlockStore.LoadBlock(height) @@ -110,7 +111,7 @@ func (env *Environment) Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes. func (env *Environment) BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error) { block := env.BlockStore.LoadBlockByHash(hash) if block == nil { - return &ctypes.ResultBlock{BlockID: types.BlockID{}, Block: nil}, nil + return &ctypes.ResultBlock{BlockID: metadata.BlockID{}, Block: nil}, nil } // If block is not nil, then blockMeta can't be nil. blockMeta := env.BlockStore.LoadBlockMeta(block.Height) diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index 29db2f094..df470b149 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -9,12 +9,13 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) func TestBlockchainInfo(t *testing.T) { @@ -120,16 +121,16 @@ type mockBlockStore struct { height int64 } -func (mockBlockStore) Base() int64 { return 1 } -func (store mockBlockStore) Height() int64 { return store.height } -func (store mockBlockStore) Size() int64 { return store.height } -func (mockBlockStore) LoadBaseMeta() *types.BlockMeta { return nil } -func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil } -func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil } -func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } -func (mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return nil } -func (mockBlockStore) LoadSeenCommit() *types.Commit { return nil } -func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil } -func (mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (mockBlockStore) Base() int64 { return 1 } +func (store mockBlockStore) Height() int64 { return store.height } +func (store mockBlockStore) Size() int64 { return store.height } +func (mockBlockStore) LoadBaseMeta() *types.BlockMeta { return nil } +func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil } +func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil } +func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } +func (mockBlockStore) LoadBlockPart(height int64, index int) *metadata.Part { return nil } +func (mockBlockStore) LoadBlockCommit(height int64) *metadata.Commit { return nil } +func (mockBlockStore) LoadSeenCommit() *metadata.Commit { return nil } +func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil } +func (mockBlockStore) SaveBlock(block *types.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) { } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index 1767c4b35..ab2e2114b 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -3,9 +3,9 @@ package core import ( cm "github.com/tendermint/tendermint/internal/consensus" tmmath "github.com/tendermint/tendermint/libs/math" + "github.com/tendermint/tendermint/pkg/consensus" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Validators gets the validator set at the given block height. @@ -57,7 +57,7 @@ func (env *Environment) DumpConsensusState(ctx *rpctypes.Context) (*ctypes.Resul peers := env.P2PPeers.Peers().List() peerStates := make([]ctypes.PeerStateInfo, len(peers)) for i, peer := range peers { - peerState, ok := peer.Get(types.PeerStateKey).(*cm.PeerState) + peerState, ok := peer.Get(consensus.PeerStateKey).(*cm.PeerState) if !ok { // peer does not have a state yet continue } diff --git a/rpc/core/env.go b/rpc/core/env.go index eb7232c01..b9661c9c3 100644 --- a/rpc/core/env.go +++ b/rpc/core/env.go @@ -12,11 +12,13 @@ import ( "github.com/tendermint/tendermint/internal/p2p" tmjson "github.com/tendermint/tendermint/libs/json" "github.com/tendermint/tendermint/libs/log" + types "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) const ( @@ -47,7 +49,7 @@ type Consensus interface { type transport interface { Listeners() []string IsListening() bool - NodeInfo() types.NodeInfo + NodeInfo() p2ptypes.NodeInfo } type peers interface { @@ -79,7 +81,7 @@ type Environment struct { GenDoc *types.GenesisDoc // cache the genesis structure EventSinks []indexer.EventSink ConsensusReactor *consensus.Reactor - EventBus *types.EventBus // thread safe + EventBus *events.EventBus // thread safe Mempool mempl.Mempool BlockSyncReactor consensus.BlockSyncReactor diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index e071c5a7e..a0e8834a0 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -3,16 +3,16 @@ package core import ( "fmt" + "github.com/tendermint/tendermint/pkg/evidence" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // BroadcastEvidence broadcasts evidence of the misbehavior. // More: https://docs.tendermint.com/master/rpc/#/Evidence/broadcast_evidence func (env *Environment) BroadcastEvidence( ctx *rpctypes.Context, - ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { + ev evidence.Evidence) (*ctypes.ResultBroadcastEvidence, error) { if ev == nil { return nil, fmt.Errorf("%w: no evidence was provided", ctypes.ErrInvalidRequest) diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 690b0a295..70f6de1f2 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -6,12 +6,13 @@ import ( "fmt" "time" - abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/internal/mempool" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" + types "github.com/tendermint/tendermint/pkg/mempool" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- @@ -71,7 +72,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* // Subscribe to tx being committed in block. subCtx, cancel := context.WithTimeout(ctx.Context(), SubscribeTimeout) defer cancel() - q := types.EventQueryTxFor(tx) + q := events.EventQueryTxFor(tx) deliverTxSub, err := env.EventBus.Subscribe(subCtx, subscriber, q) if err != nil { err = fmt.Errorf("failed to subscribe to tx: %w", err) @@ -112,7 +113,7 @@ func (env *Environment) BroadcastTxCommit(ctx *rpctypes.Context, tx types.Tx) (* // Wait for the tx to be included in a block or timeout. select { case msg := <-deliverTxSub.Out(): // The tx was included in a block. - deliverTxRes := msg.Data().(types.EventDataTx) + deliverTxRes := msg.Data().(events.EventDataTx) return &ctypes.ResultBroadcastTxCommit{ CheckTx: *checkTxRes, DeliverTx: deliverTxRes.Result, diff --git a/rpc/core/status.go b/rpc/core/status.go index 815ab37f5..0e5bd0395 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -5,9 +5,9 @@ import ( "time" tmbytes "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/pkg/consensus" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" - "github.com/tendermint/tendermint/types" ) // Status returns Tendermint status including node info, pubkey, latest block @@ -80,7 +80,7 @@ func (env *Environment) Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, err return result, nil } -func (env *Environment) validatorAtHeight(h int64) *types.Validator { +func (env *Environment) validatorAtHeight(h int64) *consensus.Validator { valsWithH, err := env.StateStore.LoadValidators(h) if err != nil { return nil diff --git a/rpc/core/tx.go b/rpc/core/tx.go index 1b3da3075..71fa413c5 100644 --- a/rpc/core/tx.go +++ b/rpc/core/tx.go @@ -7,10 +7,10 @@ import ( tmmath "github.com/tendermint/tendermint/libs/math" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/mempool" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) // Tx allows you to query the transaction results. `nil` could mean the @@ -34,7 +34,7 @@ func (env *Environment) Tx(ctx *rpctypes.Context, hash []byte, prove bool) (*cty height := r.Height index := r.Index - var proof types.TxProof + var proof mempool.TxProof if prove { block := env.BlockStore.LoadBlock(height) proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines @@ -117,14 +117,14 @@ func (env *Environment) TxSearch( for i := skipCount; i < skipCount+pageSize; i++ { r := results[i] - var proof types.TxProof + var proof mempool.TxProof if prove { block := env.BlockStore.LoadBlock(r.Height) proof = block.Data.Txs.Proof(int(r.Index)) // XXX: overflow on 32-bit machines } apiResults = append(apiResults, &ctypes.ResultTx{ - Hash: types.Tx(r.Tx).Hash(), + Hash: mempool.Tx(r.Tx).Hash(), Height: r.Height, Index: r.Index, TxResult: r.Result, diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index a49e3c0d9..df6520637 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -5,12 +5,17 @@ import ( "errors" "time" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/libs/bytes" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" + p2ptypes "github.com/tendermint/tendermint/pkg/p2p" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) // List of standardized errors used across RPC @@ -33,7 +38,7 @@ type ResultBlockchainInfo struct { // Genesis file type ResultGenesis struct { - Genesis *types.GenesisDoc `json:"genesis"` + Genesis *consensus.GenesisDoc `json:"genesis"` } // ResultGenesisChunk is the output format for the chunked/paginated @@ -48,14 +53,14 @@ type ResultGenesisChunk struct { // Single block (with meta) type ResultBlock struct { - BlockID types.BlockID `json:"block_id"` - Block *types.Block `json:"block"` + BlockID metadata.BlockID `json:"block_id"` + Block *types.Block `json:"block"` } // Commit and Header type ResultCommit struct { - types.SignedHeader `json:"signed_header"` - CanonicalCommit bool `json:"canonical"` + metadata.SignedHeader `json:"signed_header"` + CanonicalCommit bool `json:"canonical"` } // ABCI results from a block @@ -71,11 +76,11 @@ type ResultBlockResults struct { // NewResultCommit is a helper to initialize the ResultCommit with // the embedded struct -func NewResultCommit(header *types.Header, commit *types.Commit, +func NewResultCommit(header *metadata.Header, commit *metadata.Commit, canonical bool) *ResultCommit { return &ResultCommit{ - SignedHeader: types.SignedHeader{ + SignedHeader: metadata.SignedHeader{ Header: header, Commit: commit, }, @@ -112,9 +117,9 @@ type ValidatorInfo struct { // Node Status type ResultStatus struct { - NodeInfo types.NodeInfo `json:"node_info"` - SyncInfo SyncInfo `json:"sync_info"` - ValidatorInfo ValidatorInfo `json:"validator_info"` + NodeInfo p2ptypes.NodeInfo `json:"node_info"` + SyncInfo SyncInfo `json:"sync_info"` + ValidatorInfo ValidatorInfo `json:"validator_info"` } // Is TxIndexing enabled @@ -145,7 +150,7 @@ type ResultDialPeers struct { // A peer type Peer struct { - NodeInfo types.NodeInfo `json:"node_info"` + NodeInfo p2ptypes.NodeInfo `json:"node_info"` IsOutbound bool `json:"is_outbound"` ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` RemoteIP string `json:"remote_ip"` @@ -153,8 +158,8 @@ type Peer struct { // Validators for a height. type ResultValidators struct { - BlockHeight int64 `json:"block_height"` - Validators []*types.Validator `json:"validators"` + BlockHeight int64 `json:"block_height"` + Validators []*consensus.Validator `json:"validators"` // Count of actual validators in this result Count int `json:"count"` // Total number of validators @@ -163,8 +168,8 @@ type ResultValidators struct { // ConsensusParams for given height type ResultConsensusParams struct { - BlockHeight int64 `json:"block_height"` - ConsensusParams types.ConsensusParams `json:"consensus_params"` + BlockHeight int64 `json:"block_height"` + ConsensusParams consensus.ConsensusParams `json:"consensus_params"` } // Info about the consensus state. @@ -215,8 +220,8 @@ type ResultTx struct { Height int64 `json:"height"` Index uint32 `json:"index"` TxResult abci.ResponseDeliverTx `json:"tx_result"` - Tx types.Tx `json:"tx"` - Proof types.TxProof `json:"proof,omitempty"` + Tx mempool.Tx `json:"tx"` + Proof mempool.TxProof `json:"proof,omitempty"` } // Result of searching for txs @@ -233,10 +238,10 @@ type ResultBlockSearch struct { // List of mempool txs type ResultUnconfirmedTxs struct { - Count int `json:"n_txs"` - Total int `json:"total"` - TotalBytes int64 `json:"total_bytes"` - Txs []types.Tx `json:"txs"` + Count int `json:"n_txs"` + Total int `json:"total"` + TotalBytes int64 `json:"total_bytes"` + Txs []mempool.Tx `json:"txs"` } // Info abci msg @@ -265,8 +270,8 @@ type ( // Event data from a subscription type ResultEvent struct { - SubscriptionID string `json:"subscription_id"` - Query string `json:"query"` - Data types.TMEventData `json:"data"` - Events []abci.Event `json:"events"` + SubscriptionID string `json:"subscription_id"` + Query string `json:"query"` + Data events.TMEventData `json:"data"` + Events []abci.Event `json:"events"` } diff --git a/rpc/core/types/responses_test.go b/rpc/core/types/responses_test.go index a85f3f777..c349d44d8 100644 --- a/rpc/core/types/responses_test.go +++ b/rpc/core/types/responses_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/p2p" ) func TestStatusIndexer(t *testing.T) { @@ -14,17 +14,17 @@ func TestStatusIndexer(t *testing.T) { status = &ResultStatus{} assert.False(t, status.TxIndexEnabled()) - status.NodeInfo = types.NodeInfo{} + status.NodeInfo = p2p.NodeInfo{} assert.False(t, status.TxIndexEnabled()) cases := []struct { expected bool - other types.NodeInfoOther + other p2p.NodeInfoOther }{ - {false, types.NodeInfoOther{}}, - {false, types.NodeInfoOther{TxIndex: "aa"}}, - {false, types.NodeInfoOther{TxIndex: "off"}}, - {true, types.NodeInfoOther{TxIndex: "on"}}, + {false, p2p.NodeInfoOther{}}, + {false, p2p.NodeInfoOther{TxIndex: "aa"}}, + {false, p2p.NodeInfoOther{TxIndex: "off"}}, + {true, p2p.NodeInfoOther{TxIndex: "on"}}, } for _, tc := range cases { diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go index 27f8c97e4..a71431a84 100644 --- a/rpc/grpc/api.go +++ b/rpc/grpc/api.go @@ -3,7 +3,7 @@ package coregrpc import ( "context" - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" core "github.com/tendermint/tendermint/rpc/core" rpctypes "github.com/tendermint/tendermint/rpc/jsonrpc/types" ) diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go index b9cbee03f..ffd665949 100644 --- a/rpc/grpc/types.pb.go +++ b/rpc/grpc/types.pb.go @@ -7,7 +7,7 @@ import ( context "context" fmt "fmt" proto "github.com/gogo/protobuf/proto" - types "github.com/tendermint/tendermint/abci/types" + types "github.com/tendermint/tendermint/pkg/abci" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" diff --git a/rpc/openapi/openapi.yaml b/rpc/openapi/openapi.yaml index bb35d34ac..bda326d23 100644 --- a/rpc/openapi/openapi.yaml +++ b/rpc/openapi/openapi.yaml @@ -263,7 +263,7 @@ paths: DeliverTx response. import ( - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/libs/pubsub/query" ) diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go index 259450540..2286f7486 100644 --- a/rpc/test/helpers.go +++ b/rpc/test/helpers.go @@ -6,12 +6,12 @@ import ( "os" "time" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" tmnet "github.com/tendermint/tendermint/libs/net" "github.com/tendermint/tendermint/libs/service" nm "github.com/tendermint/tendermint/node" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/proxy" ctypes "github.com/tendermint/tendermint/rpc/core/types" core_grpc "github.com/tendermint/tendermint/rpc/grpc" diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index d21dc6c44..f1fcba507 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -16,7 +16,7 @@ import ( cs "github.com/tendermint/tendermint/internal/consensus" tmjson "github.com/tendermint/tendermint/libs/json" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/metadata" ) func main() { @@ -40,7 +40,7 @@ func main() { // the length of tendermint/wal/MsgInfo in the wal.json may exceed the defaultBufSize(4096) of bufio // because of the byte array in BlockPart // leading to unmarshal error: unexpected end of JSON input - br := bufio.NewReaderSize(f, int(2*types.BlockPartSizeBytes)) + br := bufio.NewReaderSize(f, int(2*metadata.BlockPartSizeBytes)) dec := cs.NewWALEncoder(walFile) for { diff --git a/state/execution.go b/state/execution.go index 05d5bdd52..a4633cb3f 100644 --- a/state/execution.go +++ b/state/execution.go @@ -6,14 +6,17 @@ import ( "fmt" "time" - abci "github.com/tendermint/tendermint/abci/types" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/libs/fail" mempl "github.com/tendermint/tendermint/internal/mempool" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" ) //----------------------------------------------------------------------------- @@ -33,7 +36,7 @@ type BlockExecutor struct { proxyApp proxy.AppConnConsensus // events - eventBus types.BlockEventPublisher + eventBus events.BlockEventPublisher // manage the mempool lock during commit // and update both with block results after commit. @@ -69,7 +72,7 @@ func NewBlockExecutor( res := &BlockExecutor{ store: stateStore, proxyApp: proxyApp, - eventBus: types.NopEventBus{}, + eventBus: events.NopEventBus{}, mempool: mempool, evpool: evpool, logger: logger, @@ -91,7 +94,7 @@ func (blockExec *BlockExecutor) Store() Store { // SetEventBus - sets the event bus for publishing block related events. // If not called, it defaults to types.NopEventBus. -func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { +func (blockExec *BlockExecutor) SetEventBus(eventBus events.BlockEventPublisher) { blockExec.eventBus = eventBus } @@ -101,9 +104,9 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) // The rest is given to txs, up to the max gas. func (blockExec *BlockExecutor) CreateProposalBlock( height int64, - state State, commit *types.Commit, + state State, commit *metadata.Commit, proposerAddr []byte, -) (*types.Block, *types.PartSet) { +) (*block.Block, *metadata.PartSet) { maxBytes := state.ConsensusParams.Block.MaxBytes maxGas := state.ConsensusParams.Block.MaxGas @@ -111,7 +114,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( evidence, evSize := blockExec.evpool.PendingEvidence(state.ConsensusParams.Evidence.MaxBytes) // Fetch a limited amount of valid txs - maxDataBytes := types.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) + maxDataBytes := block.MaxDataBytes(maxBytes, evSize, state.Validators.Size()) txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) @@ -122,7 +125,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( // If the block is invalid, it returns an error. // Validation does not mutate state, but does require historical information from the stateDB, // ie. to verify evidence from a validator at an old height. -func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { +func (blockExec *BlockExecutor) ValidateBlock(state State, block *block.Block) error { hash := block.Hash() if _, ok := blockExec.cache[hash.String()]; ok { return nil @@ -149,7 +152,7 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) e // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock( - state State, blockID types.BlockID, block *types.Block, + state State, blockID metadata.BlockID, block *block.Block, ) (State, error) { // validate the block if we haven't already @@ -183,12 +186,12 @@ func (blockExec *BlockExecutor) ApplyBlock( return state, fmt.Errorf("error in validator updates: %v", err) } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates(abciValUpdates) if err != nil { return state, err } if len(validatorUpdates) > 0 { - blockExec.logger.Debug("updates to validators", "updates", types.ValidatorListString(validatorUpdates)) + blockExec.logger.Debug("updates to validators", "updates", consensus.ValidatorListString(validatorUpdates)) } // Update the state with the block and responses. @@ -244,7 +247,7 @@ func (blockExec *BlockExecutor) ApplyBlock( // state before new txs are run in the mempool, lest they be invalid. func (blockExec *BlockExecutor) Commit( state State, - block *types.Block, + block *block.Block, deliverTxResponses []*abci.ResponseDeliverTx, ) ([]byte, int64, error) { blockExec.mempool.Lock() @@ -293,7 +296,7 @@ func (blockExec *BlockExecutor) Commit( func execBlockOnProxyApp( logger log.Logger, proxyAppConn proxy.AppConnConsensus, - block *types.Block, + block *block.Block, store Store, initialHeight int64, ) (*tmstate.ABCIResponses, error) { @@ -372,7 +375,7 @@ func execBlockOnProxyApp( return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, store Store, +func getBeginBlockValidatorInfo(block *block.Block, store Store, initialHeight int64) abci.LastCommitInfo { voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) // Initial block -> LastCommitInfo.Votes are empty. @@ -400,7 +403,7 @@ func getBeginBlockValidatorInfo(block *types.Block, store Store, for i, val := range lastValSet.Validators { commitSig := block.LastCommit.Signatures[i] voteInfos[i] = abci.VoteInfo{ - Validator: types.TM2PB.Validator(val), + Validator: consensus.TM2PB.Validator(val), SignedLastBlock: !commitSig.Absent(), } } @@ -413,7 +416,7 @@ func getBeginBlockValidatorInfo(block *types.Block, store Store, } func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, - params types.ValidatorParams) error { + params consensus.ValidatorParams) error { for _, valUpdate := range abciUpdates { if valUpdate.GetPower() < 0 { return fmt.Errorf("voting power can't be negative %v", valUpdate) @@ -440,10 +443,10 @@ func validateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, // updateState returns a new State updated according to the header and responses. func updateState( state State, - blockID types.BlockID, - header *types.Header, + blockID metadata.BlockID, + header *metadata.Header, abciResponses *tmstate.ABCIResponses, - validatorUpdates []*types.Validator, + validatorUpdates []*consensus.Validator, ) (State, error) { // Copy the valset so we can apply changes from EndBlock @@ -508,13 +511,13 @@ func updateState( // NOTE: if Tendermint crashes before commit, some or all of these events may be published again. func fireEvents( logger log.Logger, - eventBus types.BlockEventPublisher, - block *types.Block, - blockID types.BlockID, + eventBus events.BlockEventPublisher, + block *block.Block, + blockID metadata.BlockID, abciResponses *tmstate.ABCIResponses, - validatorUpdates []*types.Validator, + validatorUpdates []*consensus.Validator, ) { - if err := eventBus.PublishEventNewBlock(types.EventDataNewBlock{ + if err := eventBus.PublishEventNewBlock(events.EventDataNewBlock{ Block: block, BlockID: blockID, ResultBeginBlock: *abciResponses.BeginBlock, @@ -523,7 +526,7 @@ func fireEvents( logger.Error("failed publishing new block", "err", err) } - if err := eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ + if err := eventBus.PublishEventNewBlockHeader(events.EventDataNewBlockHeader{ Header: block.Header, NumTxs: int64(len(block.Txs)), ResultBeginBlock: *abciResponses.BeginBlock, @@ -534,7 +537,7 @@ func fireEvents( if len(block.Evidence.Evidence) != 0 { for _, ev := range block.Evidence.Evidence { - if err := eventBus.PublishEventNewEvidence(types.EventDataNewEvidence{ + if err := eventBus.PublishEventNewEvidence(events.EventDataNewEvidence{ Evidence: ev, Height: block.Height, }); err != nil { @@ -544,7 +547,7 @@ func fireEvents( } for i, tx := range block.Data.Txs { - if err := eventBus.PublishEventTx(types.EventDataTx{TxResult: abci.TxResult{ + if err := eventBus.PublishEventTx(events.EventDataTx{TxResult: abci.TxResult{ Height: block.Height, Index: uint32(i), Tx: tx, @@ -556,7 +559,7 @@ func fireEvents( if len(validatorUpdates) > 0 { if err := eventBus.PublishEventValidatorSetUpdates( - types.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil { + events.EventDataValidatorSetUpdates{ValidatorUpdates: validatorUpdates}); err != nil { logger.Error("failed publishing event", "err", err) } } @@ -570,7 +573,7 @@ func fireEvents( func ExecCommitBlock( be *BlockExecutor, appConnConsensus proxy.AppConnConsensus, - block *types.Block, + block *block.Block, logger log.Logger, store Store, initialHeight int64, @@ -590,13 +593,13 @@ func ExecCommitBlock( logger.Error("err", err) return nil, err } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates(abciValUpdates) if err != nil { logger.Error("err", err) return nil, err } - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(types.BlockPartSizeBytes).Header()} + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(metadata.BlockPartSizeBytes).Header()} fireEvents(be.logger, be.eventBus, block, blockID, abciResponses, validatorUpdates) } diff --git a/state/execution_test.go b/state/execution_test.go index 8e0ec563a..1e324e904 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -9,7 +9,6 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" @@ -17,12 +16,18 @@ import ( mmock "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/abci" + bl "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/mocks" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" dbm "github.com/tendermint/tm-db" ) @@ -46,8 +51,8 @@ func TestApplyBlock(t *testing.T) { blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, sm.EmptyEvidencePool{}, blockStore) - block := sf.MakeBlock(state, 1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(state, 1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) @@ -69,34 +74,34 @@ func TestBeginBlockValidators(t *testing.T) { stateStore := sm.NewStore(stateDB) prevHash := state.LastBlockID.Hash - prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{Hash: prevHash, PartSetHeader: prevParts} + prevParts := metadata.PartSetHeader{} + prevBlockID := metadata.BlockID{Hash: prevHash, PartSetHeader: prevParts} var ( now = tmtime.Now() - commitSig0 = types.NewCommitSigForBlock( + commitSig0 = metadata.NewCommitSigForBlock( []byte("Signature1"), state.Validators.Validators[0].Address, now) - commitSig1 = types.NewCommitSigForBlock( + commitSig1 = metadata.NewCommitSigForBlock( []byte("Signature2"), state.Validators.Validators[1].Address, now) - absentSig = types.NewCommitSigAbsent() + absentSig = metadata.NewCommitSigAbsent() ) testCases := []struct { desc string - lastCommitSigs []types.CommitSig + lastCommitSigs []metadata.CommitSig expectedAbsentValidators []int }{ - {"none absent", []types.CommitSig{commitSig0, commitSig1}, []int{}}, - {"one absent", []types.CommitSig{commitSig0, absentSig}, []int{1}}, - {"multiple absent", []types.CommitSig{absentSig, absentSig}, []int{0, 1}}, + {"none absent", []metadata.CommitSig{commitSig0, commitSig1}, []int{}}, + {"one absent", []metadata.CommitSig{commitSig0, absentSig}, []int{1}}, + {"multiple absent", []metadata.CommitSig{absentSig, absentSig}, []int{0, 1}}, } for _, tc := range testCases { - lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) + lastCommit := metadata.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) // block for height 2 block := sf.MakeBlock(state, 2, lastCommit) @@ -134,7 +139,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { defaultEvidenceTime := time.Date(2019, 1, 1, 0, 0, 0, 0, time.UTC) privVal := privVals[state.Validators.Validators[0].Address.String()] blockID := makeBlockID([]byte("headerhash"), 1000, []byte("partshash")) - header := &types.Header{ + header := &metadata.Header{ Version: version.Consensus{Block: version.BlockProtocol, App: 1}, ChainID: state.ChainID, Height: 10, @@ -152,42 +157,42 @@ func TestBeginBlockByzantineValidators(t *testing.T) { } // we don't need to worry about validating the evidence as long as they pass validate basic - dve := types.NewMockDuplicateVoteEvidenceWithValidator(3, defaultEvidenceTime, privVal, state.ChainID) + dve := evidence.NewMockDuplicateVoteEvidenceWithValidator(3, defaultEvidenceTime, privVal, state.ChainID) dve.ValidatorPower = 1000 - lcae := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + lcae := &evidence.LightClientAttackEvidence{ + ConflictingBlock: &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: header, - Commit: types.NewCommit(10, 0, makeBlockID(header.Hash(), 100, []byte("partshash")), []types.CommitSig{{ - BlockIDFlag: types.BlockIDFlagNil, + Commit: metadata.NewCommit(10, 0, makeBlockID(header.Hash(), 100, []byte("partshash")), []metadata.CommitSig{{ + BlockIDFlag: metadata.BlockIDFlagNil, ValidatorAddress: crypto.AddressHash([]byte("validator_address")), Timestamp: defaultEvidenceTime, - Signature: crypto.CRandBytes(types.MaxSignatureSize), + Signature: crypto.CRandBytes(metadata.MaxSignatureSize), }}), }, ValidatorSet: state.Validators, }, CommonHeight: 8, - ByzantineValidators: []*types.Validator{state.Validators.Validators[0]}, + ByzantineValidators: []*consensus.Validator{state.Validators.Validators[0]}, TotalVotingPower: 12, Timestamp: defaultEvidenceTime, } - ev := []types.Evidence{dve, lcae} + ev := []evidence.Evidence{dve, lcae} abciEv := []abci.Evidence{ { Type: abci.EvidenceType_DUPLICATE_VOTE, Height: 3, Time: defaultEvidenceTime, - Validator: types.TM2PB.Validator(state.Validators.Validators[0]), + Validator: consensus.TM2PB.Validator(state.Validators.Validators[0]), TotalVotingPower: 10, }, { Type: abci.EvidenceType_LIGHT_CLIENT_ATTACK, Height: 8, Time: defaultEvidenceTime, - Validator: types.TM2PB.Validator(state.Validators.Validators[0]), + Validator: consensus.TM2PB.Validator(state.Validators.Validators[0]), TotalVotingPower: 12, }, } @@ -202,10 +207,10 @@ func TestBeginBlockByzantineValidators(t *testing.T) { blockExec := sm.NewBlockExecutor(stateStore, log.TestingLogger(), proxyApp.Consensus(), mmock.Mempool{}, evpool, blockStore) - block := sf.MakeBlock(state, 1, new(types.Commit)) - block.Evidence = types.EvidenceData{Evidence: ev} + block := sf.MakeBlock(state, 1, new(metadata.Commit)) + block.Evidence = bl.EvidenceData{Evidence: ev} block.Header.EvidenceHash = block.Evidence.Hash() - blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + blockID = metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} _, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) @@ -222,13 +227,13 @@ func TestValidateValidatorUpdates(t *testing.T) { pk2, err := cryptoenc.PubKeyToProto(pubkey2) assert.NoError(t, err) - defaultValidatorParams := types.ValidatorParams{PubKeyTypes: []string{types.ABCIPubKeyTypeEd25519}} + defaultValidatorParams := consensus.ValidatorParams{PubKeyTypes: []string{consensus.ABCIPubKeyTypeEd25519}} testCases := []struct { name string abciUpdates []abci.ValidatorUpdate - validatorParams types.ValidatorParams + validatorParams consensus.ValidatorParams shouldErr bool }{ @@ -273,9 +278,9 @@ func TestValidateValidatorUpdates(t *testing.T) { func TestUpdateValidators(t *testing.T) { pubkey1 := ed25519.GenPrivKey().PubKey() - val1 := types.NewValidator(pubkey1, 10) + val1 := consensus.NewValidator(pubkey1, 10) pubkey2 := ed25519.GenPrivKey().PubKey() - val2 := types.NewValidator(pubkey2, 20) + val2 := consensus.NewValidator(pubkey2, 20) pk, err := cryptoenc.PubKeyToProto(pubkey1) require.NoError(t, err) @@ -285,38 +290,38 @@ func TestUpdateValidators(t *testing.T) { testCases := []struct { name string - currentSet *types.ValidatorSet + currentSet *consensus.ValidatorSet abciUpdates []abci.ValidatorUpdate - resultingSet *types.ValidatorSet + resultingSet *consensus.ValidatorSet shouldErr bool }{ { "adding a validator is OK", - types.NewValidatorSet([]*types.Validator{val1}), + consensus.NewValidatorSet([]*consensus.Validator{val1}), []abci.ValidatorUpdate{{PubKey: pk2, Power: 20}}, - types.NewValidatorSet([]*types.Validator{val1, val2}), + consensus.NewValidatorSet([]*consensus.Validator{val1, val2}), false, }, { "updating a validator is OK", - types.NewValidatorSet([]*types.Validator{val1}), + consensus.NewValidatorSet([]*consensus.Validator{val1}), []abci.ValidatorUpdate{{PubKey: pk, Power: 20}}, - types.NewValidatorSet([]*types.Validator{types.NewValidator(pubkey1, 20)}), + consensus.NewValidatorSet([]*consensus.Validator{consensus.NewValidator(pubkey1, 20)}), false, }, { "removing a validator is OK", - types.NewValidatorSet([]*types.Validator{val1, val2}), + consensus.NewValidatorSet([]*consensus.Validator{val1, val2}), []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, - types.NewValidatorSet([]*types.Validator{val1}), + consensus.NewValidatorSet([]*consensus.Validator{val1}), false, }, { "removing a non-existing validator results in error", - types.NewValidatorSet([]*types.Validator{val1}), + consensus.NewValidatorSet([]*consensus.Validator{val1}), []abci.ValidatorUpdate{{PubKey: pk2, Power: 0}}, - types.NewValidatorSet([]*types.Validator{val1}), + consensus.NewValidatorSet([]*consensus.Validator{val1}), true, }, } @@ -324,7 +329,7 @@ func TestUpdateValidators(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - updates, err := types.PB2TM.ValidatorUpdates(tc.abciUpdates) + updates, err := consensus.PB2TM.ValidatorUpdates(tc.abciUpdates) assert.NoError(t, err) err = tc.currentSet.UpdateWithChangeSet(updates) if tc.shouldErr { @@ -366,7 +371,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { blockStore, ) - eventBus := types.NewEventBus() + eventBus := events.NewEventBus() err = eventBus.Start() require.NoError(t, err) defer eventBus.Stop() //nolint:errcheck // ignore for tests @@ -376,12 +381,12 @@ func TestEndBlockValidatorUpdates(t *testing.T) { updatesSub, err := eventBus.Subscribe( context.Background(), "TestEndBlockValidatorUpdates", - types.EventQueryValidatorSetUpdates, + events.EventQueryValidatorSetUpdates, ) require.NoError(t, err) - block := sf.MakeBlock(state, 1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(state, 1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} pubkey := ed25519.GenPrivKey().PubKey() pk, err := cryptoenc.PubKeyToProto(pubkey) @@ -403,7 +408,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { // test we threw an event select { case msg := <-updatesSub.Out(): - event, ok := msg.Data().(types.EventDataValidatorSetUpdates) + event, ok := msg.Data().(events.EventDataValidatorSetUpdates) require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) if assert.NotEmpty(t, event.ValidatorUpdates) { assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) @@ -438,8 +443,8 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { blockStore, ) - block := sf.MakeBlock(state, 1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(state, 1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} vp, err := cryptoenc.PubKeyToProto(state.Validators.Validators[0].PubKey) require.NoError(t, err) @@ -453,16 +458,16 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { assert.NotEmpty(t, state.NextValidators.Validators) } -func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) metadata.BlockID { var ( h = make([]byte, tmhash.Size) psH = make([]byte, tmhash.Size) ) copy(h, hash) copy(psH, partSetHash) - return types.BlockID{ + return metadata.BlockID{ Hash: h, - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: partSetSize, Hash: psH, }, diff --git a/state/export_test.go b/state/export_test.go index 90e7e32a7..3c77f18f8 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -1,9 +1,10 @@ package state import ( - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" - "github.com/tendermint/tendermint/types" ) // @@ -21,16 +22,16 @@ import ( // exclusively and explicitly for testing. func UpdateState( state State, - blockID types.BlockID, - header *types.Header, + blockID metadata.BlockID, + header *metadata.Header, abciResponses *tmstate.ABCIResponses, - validatorUpdates []*types.Validator, + validatorUpdates []*consensus.Validator, ) (State, error) { return updateState(state, blockID, header, abciResponses, validatorUpdates) } // ValidateValidatorUpdates is an alias for validateValidatorUpdates exported // from execution.go, exclusively and explicitly for testing. -func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params types.ValidatorParams) error { +func ValidateValidatorUpdates(abciUpdates []abci.ValidatorUpdate, params consensus.ValidatorParams) error { return validateValidatorUpdates(abciUpdates, params) } diff --git a/state/helpers_test.go b/state/helpers_test.go index 6d575e147..0395e837f 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -7,24 +7,26 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/types" ) type paramsChangeTestCase struct { height int64 - params types.ConsensusParams + params consensus.ConsensusParams } func newTestApp() proxy.AppConns { @@ -36,47 +38,47 @@ func newTestApp() proxy.AppConns { func makeAndCommitGoodBlock( state sm.State, height int64, - lastCommit *types.Commit, + lastCommit *metadata.Commit, proposerAddr []byte, blockExec *sm.BlockExecutor, - privVals map[string]types.PrivValidator, - evidence []types.Evidence) (sm.State, types.BlockID, *types.Commit, error) { + privVals map[string]consensus.PrivValidator, + evidence []evidence.Evidence) (sm.State, metadata.BlockID, *metadata.Commit, error) { // A good block passes state, blockID, err := makeAndApplyGoodBlock(state, height, lastCommit, proposerAddr, blockExec, evidence) if err != nil { - return state, types.BlockID{}, nil, err + return state, metadata.BlockID{}, nil, err } // Simulate a lastCommit for this block from all validators for the next height commit, err := makeValidCommit(height, blockID, state.Validators, privVals) if err != nil { - return state, types.BlockID{}, nil, err + return state, metadata.BlockID{}, nil, err } return state, blockID, commit, nil } -func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *types.Commit, proposerAddr []byte, - blockExec *sm.BlockExecutor, evidence []types.Evidence) (sm.State, types.BlockID, error) { +func makeAndApplyGoodBlock(state sm.State, height int64, lastCommit *metadata.Commit, proposerAddr []byte, + blockExec *sm.BlockExecutor, evidence []evidence.Evidence) (sm.State, metadata.BlockID, error) { block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) if err := blockExec.ValidateBlock(state, block); err != nil { - return state, types.BlockID{}, err + return state, metadata.BlockID{}, err } - blockID := types.BlockID{Hash: block.Hash(), - PartSetHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} + blockID := metadata.BlockID{Hash: block.Hash(), + PartSetHeader: metadata.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} state, err := blockExec.ApplyBlock(state, blockID, block) if err != nil { - return state, types.BlockID{}, err + return state, metadata.BlockID{}, err } return state, blockID, nil } func makeValidCommit( height int64, - blockID types.BlockID, - vals *types.ValidatorSet, - privVals map[string]types.PrivValidator, -) (*types.Commit, error) { - sigs := make([]types.CommitSig, 0) + blockID metadata.BlockID, + vals *consensus.ValidatorSet, + privVals map[string]consensus.PrivValidator, +) (*metadata.Commit, error) { + sigs := make([]metadata.CommitSig, 0) for i := 0; i < vals.Size(); i++ { _, val := vals.GetByIndex(int32(i)) vote, err := factory.MakeVote(privVals[val.Address.String()], chainID, int32(i), height, 0, 2, blockID, time.Now()) @@ -85,25 +87,25 @@ func makeValidCommit( } sigs = append(sigs, vote.CommitSig()) } - return types.NewCommit(height, 0, blockID, sigs), nil + return metadata.NewCommit(height, 0, blockID, sigs), nil } -func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValidator) { - vals := make([]types.GenesisValidator, nVals) - privVals := make(map[string]types.PrivValidator, nVals) +func makeState(nVals, height int) (sm.State, dbm.DB, map[string]consensus.PrivValidator) { + vals := make([]consensus.GenesisValidator, nVals) + privVals := make(map[string]consensus.PrivValidator, nVals) for i := 0; i < nVals; i++ { secret := []byte(fmt.Sprintf("test%d", i)) pk := ed25519.GenPrivKeyFromSecret(secret) valAddr := pk.PubKey().Address() - vals[i] = types.GenesisValidator{ + vals[i] = consensus.GenesisValidator{ Address: valAddr, PubKey: pk.PubKey(), Power: 1000, Name: fmt.Sprintf("test%d", i), } - privVals[valAddr.String()] = types.NewMockPVWithParams(pk, false, false) + privVals[valAddr.String()] = consensus.NewMockPVWithParams(pk, false, false) } - s, _ := sm.MakeGenesisState(&types.GenesisDoc{ + s, _ := sm.MakeGenesisState(&consensus.GenesisDoc{ ChainID: chainID, Validators: vals, AppHash: nil, @@ -126,20 +128,20 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida return s, stateDB, privVals } -func genValSet(size int) *types.ValidatorSet { - vals := make([]*types.Validator, size) +func genValSet(size int) *consensus.ValidatorSet { + vals := make([]*consensus.Validator, size) for i := 0; i < size; i++ { - vals[i] = types.NewValidator(ed25519.GenPrivKey().PubKey(), 10) + vals[i] = consensus.NewValidator(ed25519.GenPrivKey().PubKey(), 10) } - return types.NewValidatorSet(vals) + return consensus.NewValidatorSet(vals) } func makeHeaderPartsResponsesValPubKeyChange( state sm.State, pubkey crypto.PubKey, -) (types.Header, types.BlockID, *tmstate.ABCIResponses) { +) (metadata.Header, metadata.BlockID, *tmstate.ABCIResponses) { - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(metadata.Commit)) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, @@ -163,15 +165,15 @@ func makeHeaderPartsResponsesValPubKeyChange( } } - return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + return block.Header, metadata.BlockID{Hash: block.Hash(), PartSetHeader: metadata.PartSetHeader{}}, abciResponses } func makeHeaderPartsResponsesValPowerChange( state sm.State, power int64, -) (types.Header, types.BlockID, *tmstate.ABCIResponses) { +) (metadata.Header, metadata.BlockID, *tmstate.ABCIResponses) { - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(metadata.Commit)) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, @@ -191,29 +193,29 @@ func makeHeaderPartsResponsesValPowerChange( } } - return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + return block.Header, metadata.BlockID{Hash: block.Hash(), PartSetHeader: metadata.PartSetHeader{}}, abciResponses } func makeHeaderPartsResponsesParams( state sm.State, - params *types.ConsensusParams, -) (types.Header, types.BlockID, *tmstate.ABCIResponses) { + params *consensus.ConsensusParams, +) (metadata.Header, metadata.BlockID, *tmstate.ABCIResponses) { - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(metadata.Commit)) pbParams := params.ToProto() abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: &pbParams}, } - return block.Header, types.BlockID{Hash: block.Hash(), PartSetHeader: types.PartSetHeader{}}, abciResponses + return block.Header, metadata.BlockID{Hash: block.Hash(), PartSetHeader: metadata.PartSetHeader{}}, abciResponses } -func randomGenesisDoc() *types.GenesisDoc { +func randomGenesisDoc() *consensus.GenesisDoc { pubkey := ed25519.GenPrivKey().PubKey() - return &types.GenesisDoc{ + return &consensus.GenesisDoc{ GenesisTime: tmtime.Now(), ChainID: "abc", - Validators: []types.GenesisValidator{ + Validators: []consensus.GenesisValidator{ { Address: pubkey.Address(), PubKey: pubkey, @@ -221,13 +223,13 @@ func randomGenesisDoc() *types.GenesisDoc { Name: "myval", }, }, - ConsensusParams: types.DefaultConsensusParams(), + ConsensusParams: consensus.DefaultConsensusParams(), } } // used for testing by state store func makeRandomStateFromValidatorSet( - lastValSet *types.ValidatorSet, + lastValSet *consensus.ValidatorSet, height, lastHeightValidatorsChanged int64, ) sm.State { return sm.State{ @@ -236,16 +238,16 @@ func makeRandomStateFromValidatorSet( Validators: lastValSet.CopyIncrementProposerPriority(1), LastValidators: lastValSet.Copy(), LastHeightConsensusParamsChanged: height, - ConsensusParams: *types.DefaultConsensusParams(), + ConsensusParams: *consensus.DefaultConsensusParams(), LastHeightValidatorsChanged: lastHeightValidatorsChanged, InitialHeight: 1, } } -func makeRandomStateFromConsensusParams(consensusParams *types.ConsensusParams, +func makeRandomStateFromConsensusParams(consensusParams *consensus.ConsensusParams, height, lastHeightConsensusParamsChanged int64) sm.State { val, _ := factory.RandValidator(true, 10) - valSet := types.NewValidatorSet([]*types.Validator{val}) + valSet := consensus.NewValidatorSet([]*consensus.Validator{val}) return sm.State{ LastBlockHeight: height - 1, ConsensusParams: *consensusParams, diff --git a/state/indexer/block/kv/kv.go b/state/indexer/block/kv/kv.go index bc90eadf5..823da1795 100644 --- a/state/indexer/block/kv/kv.go +++ b/state/indexer/block/kv/kv.go @@ -11,10 +11,10 @@ import ( "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) @@ -49,7 +49,7 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { // primary key: encode(block.height | height) => encode(height) // BeginBlock events: encode(eventType.eventAttr|eventValue|height|begin_block) => encode(height) // EndBlock events: encode(eventType.eventAttr|eventValue|height|end_block) => encode(height) -func (idx *BlockerIndexer) Index(bh types.EventDataNewBlockHeader) error { +func (idx *BlockerIndexer) Index(bh events.EventDataNewBlockHeader) error { batch := idx.store.NewBatch() defer batch.Close() @@ -248,7 +248,7 @@ iter: err error ) - if qr.Key == types.BlockHeightKey { + if qr.Key == events.BlockHeightKey { eventValue, err = parseValueFromPrimaryKey(it.Key()) } else { eventValue, err = parseValueFromEventKey(it.Key()) @@ -456,10 +456,10 @@ func (idx *BlockerIndexer) match( return filteredHeights, nil } -func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ string, height int64) error { +func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, ev []abci.Event, typ string, height int64) error { heightBz := int64ToBytes(height) - for _, event := range events { + for _, event := range ev { // only index events with a non-empty type if len(event.Type) == 0 { continue @@ -472,7 +472,7 @@ func (idx *BlockerIndexer) indexEvents(batch dbm.Batch, events []abci.Event, typ // index iff the event specified index:true and it's not a reserved event compositeKey := fmt.Sprintf("%s.%s", event.Type, attr.Key) - if compositeKey == types.BlockHeightKey { + if compositeKey == events.BlockHeightKey { return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeKey) } diff --git a/state/indexer/block/kv/kv_test.go b/state/indexer/block/kv/kv_test.go index 2a342f870..8bdbfe027 100644 --- a/state/indexer/block/kv/kv_test.go +++ b/state/indexer/block/kv/kv_test.go @@ -6,10 +6,11 @@ import ( "testing" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/metadata" blockidxkv "github.com/tendermint/tendermint/state/indexer/block/kv" - "github.com/tendermint/tendermint/types" db "github.com/tendermint/tm-db" ) @@ -17,8 +18,8 @@ func TestBlockIndexer(t *testing.T) { store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) indexer := blockidxkv.New(store) - require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ - Header: types.Header{Height: 1}, + require.NoError(t, indexer.Index(events.EventDataNewBlockHeader{ + Header: metadata.Header{Height: 1}, ResultBeginBlock: abci.ResponseBeginBlock{ Events: []abci.Event{ { @@ -55,8 +56,8 @@ func TestBlockIndexer(t *testing.T) { index = true } - require.NoError(t, indexer.Index(types.EventDataNewBlockHeader{ - Header: types.Header{Height: int64(i)}, + require.NoError(t, indexer.Index(events.EventDataNewBlockHeader{ + Header: metadata.Header{Height: int64(i)}, ResultBeginBlock: abci.ResponseBeginBlock{ Events: []abci.Event{ { diff --git a/state/indexer/block/kv/util.go b/state/indexer/block/kv/util.go index c0b88018e..3868afa21 100644 --- a/state/indexer/block/kv/util.go +++ b/state/indexer/block/kv/util.go @@ -7,7 +7,7 @@ import ( "github.com/google/orderedcode" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/events" ) func intInSlice(a int, list []int) bool { @@ -34,7 +34,7 @@ func int64ToBytes(i int64) []byte { func heightKey(height int64) ([]byte, error) { return orderedcode.Append( nil, - types.BlockHeightKey, + events.BlockHeightKey, height, ) } @@ -87,7 +87,7 @@ func parseValueFromEventKey(key []byte) (string, error) { func lookForHeight(conditions []query.Condition) (int64, bool) { for _, c := range conditions { - if c.CompositeKey == types.BlockHeightKey && c.Op == query.OpEqual { + if c.CompositeKey == events.BlockHeightKey && c.Op == query.OpEqual { return c.Operand.(int64), true } } diff --git a/state/indexer/block/null/null.go b/state/indexer/block/null/null.go index d36d8680e..3d3bdf532 100644 --- a/state/indexer/block/null/null.go +++ b/state/indexer/block/null/null.go @@ -5,8 +5,8 @@ import ( "errors" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) var _ indexer.BlockIndexer = (*BlockerIndexer)(nil) @@ -18,7 +18,7 @@ func (idx *BlockerIndexer) Has(height int64) (bool, error) { return false, errors.New(`indexing is disabled (set 'tx_index = "kv"' in config)`) } -func (idx *BlockerIndexer) Index(types.EventDataNewBlockHeader) error { +func (idx *BlockerIndexer) Index(events.EventDataNewBlockHeader) error { return nil } diff --git a/state/indexer/eventsink.go b/state/indexer/eventsink.go index d7c9d10e0..f7dc99cf5 100644 --- a/state/indexer/eventsink.go +++ b/state/indexer/eventsink.go @@ -3,9 +3,9 @@ package indexer import ( "context" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" ) type EventSinkType string @@ -26,7 +26,7 @@ const ( type EventSink interface { // IndexBlockEvents indexes the blockheader. - IndexBlockEvents(types.EventDataNewBlockHeader) error + IndexBlockEvents(events.EventDataNewBlockHeader) error // IndexTxEvents indexes the given result of transactions. To call it with multi transactions, // must guarantee the index of given transactions are in order. diff --git a/state/indexer/indexer.go b/state/indexer/indexer.go index 24dc62d70..0fac63371 100644 --- a/state/indexer/indexer.go +++ b/state/indexer/indexer.go @@ -4,9 +4,9 @@ import ( "context" "errors" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" ) // TxIndexer interface defines methods to index and search transactions. @@ -31,7 +31,7 @@ type BlockIndexer interface { Has(height int64) (bool, error) // Index indexes BeginBlock and EndBlock events for a given block by its height. - Index(types.EventDataNewBlockHeader) error + Index(events.EventDataNewBlockHeader) error // Search performs a query for block heights that match a given BeginBlock // and Endblock event search criteria. diff --git a/state/indexer/indexer_service.go b/state/indexer/indexer_service.go index a429b66a0..28e22567b 100644 --- a/state/indexer/indexer_service.go +++ b/state/indexer/indexer_service.go @@ -4,7 +4,7 @@ import ( "context" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/events" ) // XXX/TODO: These types should be moved to the indexer package. @@ -19,11 +19,11 @@ type Service struct { service.BaseService eventSinks []EventSink - eventBus *types.EventBus + eventBus *events.EventBus } // NewIndexerService returns a new service instance. -func NewIndexerService(es []EventSink, eventBus *types.EventBus) *Service { +func NewIndexerService(es []EventSink, eventBus *events.EventBus) *Service { is := &Service{eventSinks: es, eventBus: eventBus} is.BaseService = *service.NewBaseService(nil, "IndexerService", is) @@ -39,12 +39,12 @@ func (is *Service) OnStart() error { blockHeadersSub, err := is.eventBus.SubscribeUnbuffered( context.Background(), subscriber, - types.EventQueryNewBlockHeader) + events.EventQueryNewBlockHeader) if err != nil { return err } - txsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, types.EventQueryTx) + txsSub, err := is.eventBus.SubscribeUnbuffered(context.Background(), subscriber, events.EventQueryTx) if err != nil { return err } @@ -53,13 +53,13 @@ func (is *Service) OnStart() error { for { msg := <-blockHeadersSub.Out() - eventDataHeader := msg.Data().(types.EventDataNewBlockHeader) + eventDataHeader := msg.Data().(events.EventDataNewBlockHeader) height := eventDataHeader.Header.Height batch := NewBatch(eventDataHeader.NumTxs) for i := int64(0); i < eventDataHeader.NumTxs; i++ { msg2 := <-txsSub.Out() - txResult := msg2.Data().(types.EventDataTx).TxResult + txResult := msg2.Data().(events.EventDataTx).TxResult if err = batch.Add(&txResult); err != nil { is.Logger.Error( diff --git a/state/indexer/indexer_service_test.go b/state/indexer/indexer_service_test.go index 68a00afb5..ee98320a9 100644 --- a/state/indexer/indexer_service_test.go +++ b/state/indexer/indexer_service_test.go @@ -15,12 +15,14 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" tmlog "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" indexer "github.com/tendermint/tendermint/state/indexer" kv "github.com/tendermint/tendermint/state/indexer/sink/kv" psql "github.com/tendermint/tendermint/state/indexer/sink/psql" - "github.com/tendermint/tendermint/types" db "github.com/tendermint/tm-db" ) @@ -72,14 +74,14 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { // publish block with txs err = eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{ - Header: types.Header{Height: 1}, + Header: metadata.Header{Height: 1}, NumTxs: int64(2), }) require.NoError(t, err) txResult1 := &abci.TxResult{ Height: 1, Index: uint32(0), - Tx: types.Tx("foo"), + Tx: mempool.Tx("foo"), Result: abci.ResponseDeliverTx{Code: 0}, } err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult1}) @@ -87,7 +89,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { txResult2 := &abci.TxResult{ Height: 1, Index: uint32(1), - Tx: types.Tx("bar"), + Tx: mempool.Tx("bar"), Result: abci.ResponseDeliverTx{Code: 0}, } err = eventBus.PublishEventTx(types.EventDataTx{TxResult: *txResult2}) @@ -95,7 +97,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { time.Sleep(100 * time.Millisecond) - res, err := eventSinks[0].GetTxByHash(types.Tx("foo").Hash()) + res, err := eventSinks[0].GetTxByHash(mempool.Tx("foo").Hash()) require.NoError(t, err) require.Equal(t, txResult1, res) @@ -103,7 +105,7 @@ func TestIndexerServiceIndexesBlocks(t *testing.T) { require.NoError(t, err) require.True(t, ok) - res, err = eventSinks[0].GetTxByHash(types.Tx("bar").Hash()) + res, err = eventSinks[0].GetTxByHash(mempool.Tx("bar").Hash()) require.NoError(t, err) require.Equal(t, txResult2, res) diff --git a/state/indexer/mocks/event_sink.go b/state/indexer/mocks/event_sink.go index ce5b8ace5..a5c150a58 100644 --- a/state/indexer/mocks/event_sink.go +++ b/state/indexer/mocks/event_sink.go @@ -5,14 +5,15 @@ package mocks import ( context "context" - mock "github.com/stretchr/testify/mock" + abci "github.com/tendermint/tendermint/pkg/abci" + + events "github.com/tendermint/tendermint/pkg/events" + indexer "github.com/tendermint/tendermint/state/indexer" + mock "github.com/stretchr/testify/mock" + query "github.com/tendermint/tendermint/libs/pubsub/query" - - tenderminttypes "github.com/tendermint/tendermint/types" - - types "github.com/tendermint/tendermint/abci/types" ) // EventSink is an autogenerated mock type for the EventSink type @@ -21,15 +22,15 @@ type EventSink struct { } // GetTxByHash provides a mock function with given fields: _a0 -func (_m *EventSink) GetTxByHash(_a0 []byte) (*types.TxResult, error) { +func (_m *EventSink) GetTxByHash(_a0 []byte) (*abci.TxResult, error) { ret := _m.Called(_a0) - var r0 *types.TxResult - if rf, ok := ret.Get(0).(func([]byte) *types.TxResult); ok { + var r0 *abci.TxResult + if rf, ok := ret.Get(0).(func([]byte) *abci.TxResult); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.TxResult) + r0 = ret.Get(0).(*abci.TxResult) } } @@ -65,11 +66,11 @@ func (_m *EventSink) HasBlock(_a0 int64) (bool, error) { } // IndexBlockEvents provides a mock function with given fields: _a0 -func (_m *EventSink) IndexBlockEvents(_a0 tenderminttypes.EventDataNewBlockHeader) error { +func (_m *EventSink) IndexBlockEvents(_a0 events.EventDataNewBlockHeader) error { ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func(tenderminttypes.EventDataNewBlockHeader) error); ok { + if rf, ok := ret.Get(0).(func(events.EventDataNewBlockHeader) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) @@ -79,11 +80,11 @@ func (_m *EventSink) IndexBlockEvents(_a0 tenderminttypes.EventDataNewBlockHeade } // IndexTxEvents provides a mock function with given fields: _a0 -func (_m *EventSink) IndexTxEvents(_a0 []*types.TxResult) error { +func (_m *EventSink) IndexTxEvents(_a0 []*abci.TxResult) error { ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func([]*types.TxResult) error); ok { + if rf, ok := ret.Get(0).(func([]*abci.TxResult) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) @@ -116,15 +117,15 @@ func (_m *EventSink) SearchBlockEvents(_a0 context.Context, _a1 *query.Query) ([ } // SearchTxEvents provides a mock function with given fields: _a0, _a1 -func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*types.TxResult, error) { +func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*abci.TxResult, error) { ret := _m.Called(_a0, _a1) - var r0 []*types.TxResult - if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*types.TxResult); ok { + var r0 []*abci.TxResult + if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*abci.TxResult); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.TxResult) + r0 = ret.Get(0).([]*abci.TxResult) } } diff --git a/state/indexer/sink/kv/kv.go b/state/indexer/sink/kv/kv.go index 7d51640d8..8d72d9801 100644 --- a/state/indexer/sink/kv/kv.go +++ b/state/indexer/sink/kv/kv.go @@ -3,12 +3,12 @@ package kv import ( "context" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/state/indexer" kvb "github.com/tendermint/tendermint/state/indexer/block/kv" kvt "github.com/tendermint/tendermint/state/indexer/tx/kv" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" ) @@ -32,7 +32,7 @@ func (kves *EventSink) Type() indexer.EventSinkType { return indexer.KV } -func (kves *EventSink) IndexBlockEvents(bh types.EventDataNewBlockHeader) error { +func (kves *EventSink) IndexBlockEvents(bh events.EventDataNewBlockHeader) error { return kves.bi.Index(bh) } diff --git a/state/indexer/sink/kv/kv_test.go b/state/indexer/sink/kv/kv_test.go index a5d2dd81e..c0cb138ae 100644 --- a/state/indexer/sink/kv/kv_test.go +++ b/state/indexer/sink/kv/kv_test.go @@ -8,11 +8,13 @@ import ( "github.com/gogo/protobuf/proto" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/state/indexer" kvtx "github.com/tendermint/tendermint/state/indexer/tx/kv" - "github.com/tendermint/tendermint/types" db "github.com/tendermint/tm-db" ) @@ -30,8 +32,8 @@ func TestBlockFuncs(t *testing.T) { store := db.NewPrefixDB(db.NewMemDB(), []byte("block_events")) indexer := NewEventSink(store) - require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ - Header: types.Header{Height: 1}, + require.NoError(t, indexer.IndexBlockEvents(events.EventDataNewBlockHeader{ + Header: metadata.Header{Height: 1}, ResultBeginBlock: abci.ResponseBeginBlock{ Events: []abci.Event{ { @@ -72,8 +74,8 @@ func TestBlockFuncs(t *testing.T) { index = true } - require.NoError(t, indexer.IndexBlockEvents(types.EventDataNewBlockHeader{ - Header: types.Header{Height: int64(i)}, + require.NoError(t, indexer.IndexBlockEvents(events.EventDataNewBlockHeader{ + Header: metadata.Header{Height: int64(i)}, ResultBeginBlock: abci.ResponseBeginBlock{ Events: []abci.Event{ { @@ -168,7 +170,7 @@ func TestTxSearchWithCancelation(t *testing.T) { err := indexer.IndexTxEvents([]*abci.TxResult{txResult}) require.NoError(t, err) - r, e := indexer.GetTxByHash(types.Tx("HELLO WORLD").Hash()) + r, e := indexer.GetTxByHash(mempool.Tx("HELLO WORLD").Hash()) assert.Nil(t, e) assert.Equal(t, r, txResult) @@ -187,16 +189,16 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { txResult1 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, }) - hash1 := types.Tx(txResult1.Tx).Hash() + hash1 := mempool.Tx(txResult1.Tx).Hash() err := indexer.IndexTxEvents([]*abci.TxResult{txResult1}) require.NoError(t, err) // index tx also using deprecated indexing (event as key) txResult2 := txResultWithEvents(nil) - txResult2.Tx = types.Tx("HELLO WORLD 2") + txResult2.Tx = mempool.Tx("HELLO WORLD 2") - hash2 := types.Tx(txResult2.Tx).Hash() + hash2 := mempool.Tx(txResult2.Tx).Hash() b := esdb.NewBatch() rawBytes, err := proto.Marshal(txResult2) @@ -289,7 +291,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, }) - txResult.Tx = types.Tx("Bob's account") + txResult.Tx = mempool.Tx("Bob's account") txResult.Height = 2 txResult.Index = 1 err := indexer.IndexTxEvents([]*abci.TxResult{txResult}) @@ -299,7 +301,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { txResult2 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "2", Index: true}}}, }) - txResult2.Tx = types.Tx("Alice's account") + txResult2.Tx = mempool.Tx("Alice's account") txResult2.Height = 1 txResult2.Index = 2 @@ -310,7 +312,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { txResult3 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "3", Index: true}}}, }) - txResult3.Tx = types.Tx("Jack's account") + txResult3.Tx = mempool.Tx("Jack's account") txResult3.Height = 1 txResult3.Index = 1 err = indexer.IndexTxEvents([]*abci.TxResult{txResult3}) @@ -321,7 +323,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { txResult4 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number.id", Value: "1", Index: true}}}, }) - txResult4.Tx = types.Tx("Mike's account") + txResult4.Tx = mempool.Tx("Mike's account") txResult4.Height = 2 txResult4.Index = 2 err = indexer.IndexTxEvents([]*abci.TxResult{txResult4}) @@ -336,7 +338,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { } func txResultWithEvents(events []abci.Event) *abci.TxResult { - tx := types.Tx("HELLO WORLD") + tx := mempool.Tx("HELLO WORLD") return &abci.TxResult{ Height: 1, Index: 0, diff --git a/state/indexer/sink/null/null.go b/state/indexer/sink/null/null.go index b5ad93ab4..685fdef52 100644 --- a/state/indexer/sink/null/null.go +++ b/state/indexer/sink/null/null.go @@ -3,10 +3,10 @@ package null import ( "context" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) var _ indexer.EventSink = (*EventSink)(nil) @@ -22,7 +22,7 @@ func (nes *EventSink) Type() indexer.EventSinkType { return indexer.NULL } -func (nes *EventSink) IndexBlockEvents(bh types.EventDataNewBlockHeader) error { +func (nes *EventSink) IndexBlockEvents(bh events.EventDataNewBlockHeader) error { return nil } diff --git a/state/indexer/sink/null/null_test.go b/state/indexer/sink/null/null_test.go index eef63fd6e..6a9f60c64 100644 --- a/state/indexer/sink/null/null_test.go +++ b/state/indexer/sink/null/null_test.go @@ -5,15 +5,15 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/pkg/events" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) func TestNullEventSink(t *testing.T) { nullIndexer := NewEventSink() assert.Nil(t, nullIndexer.IndexTxEvents(nil)) - assert.Nil(t, nullIndexer.IndexBlockEvents(types.EventDataNewBlockHeader{})) + assert.Nil(t, nullIndexer.IndexBlockEvents(events.EventDataNewBlockHeader{})) val1, err1 := nullIndexer.SearchBlockEvents(context.TODO(), nil) assert.Nil(t, val1) assert.Nil(t, err1) diff --git a/state/indexer/sink/psql/psql.go b/state/indexer/sink/psql/psql.go index efb539e0b..9898dd222 100644 --- a/state/indexer/sink/psql/psql.go +++ b/state/indexer/sink/psql/psql.go @@ -9,10 +9,11 @@ import ( sq "github.com/Masterminds/squirrel" proto "github.com/gogo/protobuf/proto" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/mempool" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) var _ indexer.EventSink = (*EventSink)(nil) @@ -121,7 +122,7 @@ func (es *EventSink) IndexTxEvents(txr []*abci.TxResult) error { } // index the reserved height and hash indices - hash := fmt.Sprintf("%X", types.Tx(tx.Tx).Hash()) + hash := fmt.Sprintf("%X", mempool.Tx(tx.Tx).Hash()) sqlStmtEvents = sqlStmtEvents.Values(types.TxHashKey, hash, tx.Height, hash, txid, ts, es.chainID) sqlStmtEvents = sqlStmtEvents.Values(types.TxHeightKey, fmt.Sprint(tx.Height), tx.Height, hash, txid, ts, es.chainID) diff --git a/state/indexer/sink/psql/psql_test.go b/state/indexer/sink/psql/psql_test.go index 0df773a53..d742d639e 100644 --- a/state/indexer/sink/psql/psql_test.go +++ b/state/indexer/sink/psql/psql_test.go @@ -18,9 +18,11 @@ import ( "github.com/ory/dockertest/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) var db *sql.DB @@ -94,14 +96,14 @@ func TestTxFuncs(t *testing.T) { err = indexer.IndexTxEvents([]*abci.TxResult{txResult}) require.NoError(t, err) - tx, err := verifyTx(types.Tx(txResult.Tx).Hash()) + tx, err := verifyTx(mempool.Tx(txResult.Tx).Hash()) require.NoError(t, err) assert.Equal(t, txResult, tx) require.NoError(t, verifyTimeStamp(TableEventTx)) require.NoError(t, verifyTimeStamp(TableResultTx)) - tx, err = indexer.GetTxByHash(types.Tx(txResult.Tx).Hash()) + tx, err = indexer.GetTxByHash(mempool.Tx(txResult.Tx).Hash()) assert.Nil(t, tx) assert.Equal(t, errors.New("getTxByHash is not supported via the postgres event sink"), err) @@ -129,7 +131,7 @@ func TestStop(t *testing.T) { func getTestBlockHeader() types.EventDataNewBlockHeader { return types.EventDataNewBlockHeader{ - Header: types.Header{Height: 1}, + Header: metadata.Header{Height: 1}, ResultBeginBlock: abci.ResponseBeginBlock{ Events: []abci.Event{ { @@ -187,7 +189,7 @@ func resetDB(t *testing.T) { } func txResultWithEvents(events []abci.Event) *abci.TxResult { - tx := types.Tx("HELLO WORLD") + tx := mempool.Tx("HELLO WORLD") return &abci.TxResult{ Height: 1, Index: 0, diff --git a/state/indexer/tx/kv/kv.go b/state/indexer/tx/kv/kv.go index 080dbce2c..742c53382 100644 --- a/state/indexer/tx/kv/kv.go +++ b/state/indexer/tx/kv/kv.go @@ -11,10 +11,11 @@ import ( "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/events" + "github.com/tendermint/tendermint/pkg/mempool" indexer "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) var _ indexer.TxIndexer = (*TxIndex)(nil) @@ -67,7 +68,7 @@ func (txi *TxIndex) Index(results []*abci.TxResult) error { defer b.Close() for _, result := range results { - hash := types.Tx(result.Tx).Hash() + hash := mempool.Tx(result.Tx).Hash() // index tx by events err := txi.indexEvents(result, hash, b) @@ -110,7 +111,7 @@ func (txi *TxIndex) indexEvents(result *abci.TxResult, hash []byte, store dbm.Ba // index if `index: true` is set compositeTag := fmt.Sprintf("%s.%s", event.Type, attr.Key) // ensure event does not conflict with a reserved prefix key - if compositeTag == types.TxHashKey || compositeTag == types.TxHeightKey { + if compositeTag == events.TxHashKey || compositeTag == events.TxHeightKey { return fmt.Errorf("event type and attribute key \"%s\" is reserved; please use a different key", compositeTag) } if attr.GetIndex() { @@ -240,7 +241,7 @@ hashes: func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) { for _, c := range conditions { - if c.CompositeKey == types.TxHashKey { + if c.CompositeKey == events.TxHashKey { decoded, err := hex.DecodeString(c.Operand.(string)) return decoded, true, err } @@ -251,7 +252,7 @@ func lookForHash(conditions []query.Condition) (hash []byte, ok bool, err error) // lookForHeight returns a height if there is an "height=X" condition. func lookForHeight(conditions []query.Condition) (height int64) { for _, c := range conditions { - if c.CompositeKey == types.TxHeightKey && c.Op == query.OpEqual { + if c.CompositeKey == events.TxHeightKey && c.Op == query.OpEqual { return c.Operand.(int64) } } @@ -507,7 +508,7 @@ iter: func primaryKey(hash []byte) []byte { key, err := orderedcode.Append( nil, - types.TxHashKey, + events.TxHashKey, string(hash), ) if err != nil { @@ -554,7 +555,7 @@ func keyFromEvent(compositeKey string, value string, result *abci.TxResult) []by } func KeyFromHeight(result *abci.TxResult) []byte { - return secondaryKey(types.TxHeightKey, fmt.Sprintf("%d", result.Height), result.Height, result.Index) + return secondaryKey(events.TxHeightKey, fmt.Sprintf("%d", result.Height), result.Height, result.Index) } // Prefixes: these represent an initial part of the key and are used by iterators to iterate over a small diff --git a/state/indexer/tx/kv/kv_bench_test.go b/state/indexer/tx/kv/kv_bench_test.go index 3f4e63ee1..cd09fc69d 100644 --- a/state/indexer/tx/kv/kv_bench_test.go +++ b/state/indexer/tx/kv/kv_bench_test.go @@ -9,9 +9,9 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/mempool" ) func BenchmarkTxSearch(b *testing.B) { @@ -46,7 +46,7 @@ func BenchmarkTxSearch(b *testing.B) { txResult := &abci.TxResult{ Height: int64(i), Index: 0, - Tx: types.Tx(string(txBz)), + Tx: mempool.Tx(string(txBz)), Result: abci.ResponseDeliverTx{ Data: []byte{0}, Code: abci.CodeTypeOK, diff --git a/state/indexer/tx/kv/kv_test.go b/state/indexer/tx/kv/kv_test.go index dd63dd9a4..ded2e033b 100644 --- a/state/indexer/tx/kv/kv_test.go +++ b/state/indexer/tx/kv/kv_test.go @@ -13,17 +13,17 @@ import ( db "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/mempool" indexer "github.com/tendermint/tendermint/state/indexer" - "github.com/tendermint/tendermint/types" ) func TestTxIndex(t *testing.T) { txIndexer := NewTxIndex(db.NewMemDB()) - tx := types.Tx("HELLO WORLD") + tx := mempool.Tx("HELLO WORLD") txResult := &abci.TxResult{ Height: 1, Index: 0, @@ -46,7 +46,7 @@ func TestTxIndex(t *testing.T) { require.NoError(t, err) assert.True(t, proto.Equal(txResult, loadedTxResult)) - tx2 := types.Tx("BYE BYE WORLD") + tx2 := mempool.Tx("BYE BYE WORLD") txResult2 := &abci.TxResult{ Height: 1, Index: 0, @@ -74,7 +74,7 @@ func TestTxSearch(t *testing.T) { {Type: "account", Attributes: []abci.EventAttribute{{Key: "owner", Value: "Ivan", Index: true}}}, {Type: "", Attributes: []abci.EventAttribute{{Key: "not_allowed", Value: "Vlad", Index: true}}}, }) - hash := types.Tx(txResult.Tx).Hash() + hash := mempool.Tx(txResult.Tx).Hash() err := indexer.Index([]*abci.TxResult{txResult}) require.NoError(t, err) @@ -171,16 +171,16 @@ func TestTxSearchDeprecatedIndexing(t *testing.T) { txResult1 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, }) - hash1 := types.Tx(txResult1.Tx).Hash() + hash1 := mempool.Tx(txResult1.Tx).Hash() err := indexer.Index([]*abci.TxResult{txResult1}) require.NoError(t, err) // index tx also using deprecated indexing (event as key) txResult2 := txResultWithEvents(nil) - txResult2.Tx = types.Tx("HELLO WORLD 2") + txResult2.Tx = mempool.Tx("HELLO WORLD 2") - hash2 := types.Tx(txResult2.Tx).Hash() + hash2 := mempool.Tx(txResult2.Tx).Hash() b := indexer.store.NewBatch() rawBytes, err := proto.Marshal(txResult2) @@ -273,7 +273,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "1", Index: true}}}, }) - txResult.Tx = types.Tx("Bob's account") + txResult.Tx = mempool.Tx("Bob's account") txResult.Height = 2 txResult.Index = 1 err := indexer.Index([]*abci.TxResult{txResult}) @@ -283,7 +283,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { txResult2 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "2", Index: true}}}, }) - txResult2.Tx = types.Tx("Alice's account") + txResult2.Tx = mempool.Tx("Alice's account") txResult2.Height = 1 txResult2.Index = 2 @@ -294,7 +294,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { txResult3 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number", Value: "3", Index: true}}}, }) - txResult3.Tx = types.Tx("Jack's account") + txResult3.Tx = mempool.Tx("Jack's account") txResult3.Height = 1 txResult3.Index = 1 err = indexer.Index([]*abci.TxResult{txResult3}) @@ -305,7 +305,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { txResult4 := txResultWithEvents([]abci.Event{ {Type: "account", Attributes: []abci.EventAttribute{{Key: "number.id", Value: "1", Index: true}}}, }) - txResult4.Tx = types.Tx("Mike's account") + txResult4.Tx = mempool.Tx("Mike's account") txResult4.Height = 2 txResult4.Index = 2 err = indexer.Index([]*abci.TxResult{txResult4}) @@ -320,7 +320,7 @@ func TestTxSearchMultipleTxs(t *testing.T) { } func txResultWithEvents(events []abci.Event) *abci.TxResult { - tx := types.Tx("HELLO WORLD") + tx := mempool.Tx("HELLO WORLD") return &abci.TxResult{ Height: 1, Index: 0, diff --git a/state/indexer/tx/null/null.go b/state/indexer/tx/null/null.go index d92ed489e..a990b40ae 100644 --- a/state/indexer/tx/null/null.go +++ b/state/indexer/tx/null/null.go @@ -4,8 +4,8 @@ import ( "context" "errors" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/pubsub/query" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/state/indexer" ) diff --git a/state/mocks/block_store.go b/state/mocks/block_store.go index e66aad071..4683aa252 100644 --- a/state/mocks/block_store.go +++ b/state/mocks/block_store.go @@ -3,9 +3,10 @@ package mocks import ( - mock "github.com/stretchr/testify/mock" + block "github.com/tendermint/tendermint/pkg/block" + metadata "github.com/tendermint/tendermint/pkg/metadata" - types "github.com/tendermint/tendermint/types" + mock "github.com/stretchr/testify/mock" ) // BlockStore is an autogenerated mock type for the BlockStore type @@ -42,15 +43,15 @@ func (_m *BlockStore) Height() int64 { } // LoadBaseMeta provides a mock function with given fields: -func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { +func (_m *BlockStore) LoadBaseMeta() *block.BlockMeta { ret := _m.Called() - var r0 *types.BlockMeta - if rf, ok := ret.Get(0).(func() *types.BlockMeta); ok { + var r0 *block.BlockMeta + if rf, ok := ret.Get(0).(func() *block.BlockMeta); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.BlockMeta) + r0 = ret.Get(0).(*block.BlockMeta) } } @@ -58,15 +59,15 @@ func (_m *BlockStore) LoadBaseMeta() *types.BlockMeta { } // LoadBlock provides a mock function with given fields: height -func (_m *BlockStore) LoadBlock(height int64) *types.Block { +func (_m *BlockStore) LoadBlock(height int64) *block.Block { ret := _m.Called(height) - var r0 *types.Block - if rf, ok := ret.Get(0).(func(int64) *types.Block); ok { + var r0 *block.Block + if rf, ok := ret.Get(0).(func(int64) *block.Block); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) + r0 = ret.Get(0).(*block.Block) } } @@ -74,15 +75,15 @@ func (_m *BlockStore) LoadBlock(height int64) *types.Block { } // LoadBlockByHash provides a mock function with given fields: hash -func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { +func (_m *BlockStore) LoadBlockByHash(hash []byte) *block.Block { ret := _m.Called(hash) - var r0 *types.Block - if rf, ok := ret.Get(0).(func([]byte) *types.Block); ok { + var r0 *block.Block + if rf, ok := ret.Get(0).(func([]byte) *block.Block); ok { r0 = rf(hash) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Block) + r0 = ret.Get(0).(*block.Block) } } @@ -90,15 +91,15 @@ func (_m *BlockStore) LoadBlockByHash(hash []byte) *types.Block { } // LoadBlockCommit provides a mock function with given fields: height -func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { +func (_m *BlockStore) LoadBlockCommit(height int64) *metadata.Commit { ret := _m.Called(height) - var r0 *types.Commit - if rf, ok := ret.Get(0).(func(int64) *types.Commit); ok { + var r0 *metadata.Commit + if rf, ok := ret.Get(0).(func(int64) *metadata.Commit); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Commit) + r0 = ret.Get(0).(*metadata.Commit) } } @@ -106,15 +107,15 @@ func (_m *BlockStore) LoadBlockCommit(height int64) *types.Commit { } // LoadBlockMeta provides a mock function with given fields: height -func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { +func (_m *BlockStore) LoadBlockMeta(height int64) *block.BlockMeta { ret := _m.Called(height) - var r0 *types.BlockMeta - if rf, ok := ret.Get(0).(func(int64) *types.BlockMeta); ok { + var r0 *block.BlockMeta + if rf, ok := ret.Get(0).(func(int64) *block.BlockMeta); ok { r0 = rf(height) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.BlockMeta) + r0 = ret.Get(0).(*block.BlockMeta) } } @@ -122,15 +123,15 @@ func (_m *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { } // LoadBlockPart provides a mock function with given fields: height, index -func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { +func (_m *BlockStore) LoadBlockPart(height int64, index int) *metadata.Part { ret := _m.Called(height, index) - var r0 *types.Part - if rf, ok := ret.Get(0).(func(int64, int) *types.Part); ok { + var r0 *metadata.Part + if rf, ok := ret.Get(0).(func(int64, int) *metadata.Part); ok { r0 = rf(height, index) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Part) + r0 = ret.Get(0).(*metadata.Part) } } @@ -138,15 +139,15 @@ func (_m *BlockStore) LoadBlockPart(height int64, index int) *types.Part { } // LoadSeenCommit provides a mock function with given fields: -func (_m *BlockStore) LoadSeenCommit() *types.Commit { +func (_m *BlockStore) LoadSeenCommit() *metadata.Commit { ret := _m.Called() - var r0 *types.Commit - if rf, ok := ret.Get(0).(func() *types.Commit); ok { + var r0 *metadata.Commit + if rf, ok := ret.Get(0).(func() *metadata.Commit); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.Commit) + r0 = ret.Get(0).(*metadata.Commit) } } @@ -174,9 +175,9 @@ func (_m *BlockStore) PruneBlocks(height int64) (uint64, error) { return r0, r1 } -// SaveBlock provides a mock function with given fields: block, blockParts, seenCommit -func (_m *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - _m.Called(block, blockParts, seenCommit) +// SaveBlock provides a mock function with given fields: _a0, blockParts, seenCommit +func (_m *BlockStore) SaveBlock(_a0 *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) { + _m.Called(_a0, blockParts, seenCommit) } // Size provides a mock function with given fields: diff --git a/state/mocks/event_sink.go b/state/mocks/event_sink.go deleted file mode 100644 index 749515ccf..000000000 --- a/state/mocks/event_sink.go +++ /dev/null @@ -1,167 +0,0 @@ -// Code generated by mockery 2.7.5. DO NOT EDIT. - -package mocks - -import ( - context "context" - - mock "github.com/stretchr/testify/mock" - indexer "github.com/tendermint/tendermint/state/indexer" - - query "github.com/tendermint/tendermint/libs/pubsub/query" - - tenderminttypes "github.com/tendermint/tendermint/types" - - types "github.com/tendermint/tendermint/abci/types" -) - -// EventSink is an autogenerated mock type for the EventSink type -type EventSink struct { - mock.Mock -} - -// GetTxByHash provides a mock function with given fields: _a0 -func (_m *EventSink) GetTxByHash(_a0 []byte) (*types.TxResult, error) { - ret := _m.Called(_a0) - - var r0 *types.TxResult - if rf, ok := ret.Get(0).(func([]byte) *types.TxResult); ok { - r0 = rf(_a0) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.TxResult) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func([]byte) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// HasBlock provides a mock function with given fields: _a0 -func (_m *EventSink) HasBlock(_a0 int64) (bool, error) { - ret := _m.Called(_a0) - - var r0 bool - if rf, ok := ret.Get(0).(func(int64) bool); ok { - r0 = rf(_a0) - } else { - r0 = ret.Get(0).(bool) - } - - var r1 error - if rf, ok := ret.Get(1).(func(int64) error); ok { - r1 = rf(_a0) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// IndexBlockEvents provides a mock function with given fields: _a0 -func (_m *EventSink) IndexBlockEvents(_a0 tenderminttypes.EventDataNewBlockHeader) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func(tenderminttypes.EventDataNewBlockHeader) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// IndexTxEvents provides a mock function with given fields: _a0 -func (_m *EventSink) IndexTxEvents(_a0 []*types.TxResult) error { - ret := _m.Called(_a0) - - var r0 error - if rf, ok := ret.Get(0).(func([]*types.TxResult) error); ok { - r0 = rf(_a0) - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// SearchBlockEvents provides a mock function with given fields: _a0, _a1 -func (_m *EventSink) SearchBlockEvents(_a0 context.Context, _a1 *query.Query) ([]int64, error) { - ret := _m.Called(_a0, _a1) - - var r0 []int64 - if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []int64); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]int64) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// SearchTxEvents provides a mock function with given fields: _a0, _a1 -func (_m *EventSink) SearchTxEvents(_a0 context.Context, _a1 *query.Query) ([]*types.TxResult, error) { - ret := _m.Called(_a0, _a1) - - var r0 []*types.TxResult - if rf, ok := ret.Get(0).(func(context.Context, *query.Query) []*types.TxResult); ok { - r0 = rf(_a0, _a1) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).([]*types.TxResult) - } - } - - var r1 error - if rf, ok := ret.Get(1).(func(context.Context, *query.Query) error); ok { - r1 = rf(_a0, _a1) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// Stop provides a mock function with given fields: -func (_m *EventSink) Stop() error { - ret := _m.Called() - - var r0 error - if rf, ok := ret.Get(0).(func() error); ok { - r0 = rf() - } else { - r0 = ret.Error(0) - } - - return r0 -} - -// Type provides a mock function with given fields: -func (_m *EventSink) Type() indexer.EventSinkType { - ret := _m.Called() - - var r0 indexer.EventSinkType - if rf, ok := ret.Get(0).(func() indexer.EventSinkType); ok { - r0 = rf() - } else { - r0 = ret.Get(0).(indexer.EventSinkType) - } - - return r0 -} diff --git a/state/mocks/evidence_pool.go b/state/mocks/evidence_pool.go index bb33547d2..125a856e4 100644 --- a/state/mocks/evidence_pool.go +++ b/state/mocks/evidence_pool.go @@ -4,9 +4,9 @@ package mocks import ( mock "github.com/stretchr/testify/mock" - state "github.com/tendermint/tendermint/state" + evidence "github.com/tendermint/tendermint/pkg/evidence" - types "github.com/tendermint/tendermint/types" + state "github.com/tendermint/tendermint/state" ) // EvidencePool is an autogenerated mock type for the EvidencePool type @@ -15,11 +15,11 @@ type EvidencePool struct { } // AddEvidence provides a mock function with given fields: _a0 -func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { +func (_m *EvidencePool) AddEvidence(_a0 evidence.Evidence) error { ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func(types.Evidence) error); ok { + if rf, ok := ret.Get(0).(func(evidence.Evidence) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) @@ -29,11 +29,11 @@ func (_m *EvidencePool) AddEvidence(_a0 types.Evidence) error { } // CheckEvidence provides a mock function with given fields: _a0 -func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { +func (_m *EvidencePool) CheckEvidence(_a0 evidence.EvidenceList) error { ret := _m.Called(_a0) var r0 error - if rf, ok := ret.Get(0).(func(types.EvidenceList) error); ok { + if rf, ok := ret.Get(0).(func(evidence.EvidenceList) error); ok { r0 = rf(_a0) } else { r0 = ret.Error(0) @@ -43,15 +43,15 @@ func (_m *EvidencePool) CheckEvidence(_a0 types.EvidenceList) error { } // PendingEvidence provides a mock function with given fields: maxBytes -func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64) { +func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]evidence.Evidence, int64) { ret := _m.Called(maxBytes) - var r0 []types.Evidence - if rf, ok := ret.Get(0).(func(int64) []types.Evidence); ok { + var r0 []evidence.Evidence + if rf, ok := ret.Get(0).(func(int64) []evidence.Evidence); ok { r0 = rf(maxBytes) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]types.Evidence) + r0 = ret.Get(0).([]evidence.Evidence) } } @@ -66,6 +66,6 @@ func (_m *EvidencePool) PendingEvidence(maxBytes int64) ([]types.Evidence, int64 } // Update provides a mock function with given fields: _a0, _a1 -func (_m *EvidencePool) Update(_a0 state.State, _a1 types.EvidenceList) { +func (_m *EvidencePool) Update(_a0 state.State, _a1 evidence.EvidenceList) { _m.Called(_a0, _a1) } diff --git a/state/mocks/store.go b/state/mocks/store.go index 750bf7f29..6d5dc1fca 100644 --- a/state/mocks/store.go +++ b/state/mocks/store.go @@ -4,11 +4,11 @@ package mocks import ( mock "github.com/stretchr/testify/mock" + consensus "github.com/tendermint/tendermint/pkg/consensus" + state "github.com/tendermint/tendermint/state" tendermintstate "github.com/tendermint/tendermint/proto/tendermint/state" - - types "github.com/tendermint/tendermint/types" ) // Store is an autogenerated mock type for the Store type @@ -75,14 +75,14 @@ func (_m *Store) LoadABCIResponses(_a0 int64) (*tendermintstate.ABCIResponses, e } // LoadConsensusParams provides a mock function with given fields: _a0 -func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { +func (_m *Store) LoadConsensusParams(_a0 int64) (consensus.ConsensusParams, error) { ret := _m.Called(_a0) - var r0 types.ConsensusParams - if rf, ok := ret.Get(0).(func(int64) types.ConsensusParams); ok { + var r0 consensus.ConsensusParams + if rf, ok := ret.Get(0).(func(int64) consensus.ConsensusParams); ok { r0 = rf(_a0) } else { - r0 = ret.Get(0).(types.ConsensusParams) + r0 = ret.Get(0).(consensus.ConsensusParams) } var r1 error @@ -96,15 +96,15 @@ func (_m *Store) LoadConsensusParams(_a0 int64) (types.ConsensusParams, error) { } // LoadValidators provides a mock function with given fields: _a0 -func (_m *Store) LoadValidators(_a0 int64) (*types.ValidatorSet, error) { +func (_m *Store) LoadValidators(_a0 int64) (*consensus.ValidatorSet, error) { ret := _m.Called(_a0) - var r0 *types.ValidatorSet - if rf, ok := ret.Get(0).(func(int64) *types.ValidatorSet); ok { + var r0 *consensus.ValidatorSet + if rf, ok := ret.Get(0).(func(int64) *consensus.ValidatorSet); ok { r0 = rf(_a0) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*types.ValidatorSet) + r0 = ret.Get(0).(*consensus.ValidatorSet) } } @@ -161,11 +161,11 @@ func (_m *Store) SaveABCIResponses(_a0 int64, _a1 *tendermintstate.ABCIResponses } // SaveValidatorSets provides a mock function with given fields: _a0, _a1, _a2 -func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *types.ValidatorSet) error { +func (_m *Store) SaveValidatorSets(_a0 int64, _a1 int64, _a2 *consensus.ValidatorSet) error { ret := _m.Called(_a0, _a1, _a2) var r0 error - if rf, ok := ret.Get(0).(func(int64, int64, *types.ValidatorSet) error); ok { + if rf, ok := ret.Get(0).(func(int64, int64, *consensus.ValidatorSet) error); ok { r0 = rf(_a0, _a1, _a2) } else { r0 = ret.Error(0) diff --git a/state/services.go b/state/services.go index c692d0b94..e6c94492a 100644 --- a/state/services.go +++ b/state/services.go @@ -1,7 +1,10 @@ package state import ( - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" ) //------------------------------------------------------ @@ -20,19 +23,19 @@ type BlockStore interface { Height() int64 Size() int64 - LoadBaseMeta() *types.BlockMeta - LoadBlockMeta(height int64) *types.BlockMeta - LoadBlock(height int64) *types.Block + LoadBaseMeta() *block.BlockMeta + LoadBlockMeta(height int64) *block.BlockMeta + LoadBlock(height int64) *block.Block - SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) + SaveBlock(block *block.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) PruneBlocks(height int64) (uint64, error) - LoadBlockByHash(hash []byte) *types.Block - LoadBlockPart(height int64, index int) *types.Part + LoadBlockByHash(hash []byte) *block.Block + LoadBlockPart(height int64, index int) *metadata.Part - LoadBlockCommit(height int64) *types.Commit - LoadSeenCommit() *types.Commit + LoadBlockCommit(height int64) *metadata.Commit + LoadSeenCommit() *metadata.Commit } //----------------------------------------------------------------------------- @@ -42,20 +45,20 @@ type BlockStore interface { // EvidencePool defines the EvidencePool interface used by State. type EvidencePool interface { - PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) - AddEvidence(types.Evidence) error - Update(State, types.EvidenceList) - CheckEvidence(types.EvidenceList) error + PendingEvidence(maxBytes int64) (ev []evidence.Evidence, size int64) + AddEvidence(evidence.Evidence) error + Update(State, evidence.EvidenceList) + CheckEvidence(evidence.EvidenceList) error } // EmptyEvidencePool is an empty implementation of EvidencePool, useful for testing. It also complies // to the consensus evidence pool interface type EmptyEvidencePool struct{} -func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []types.Evidence, size int64) { +func (EmptyEvidencePool) PendingEvidence(maxBytes int64) (ev []evidence.Evidence, size int64) { return nil, 0 } -func (EmptyEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (EmptyEvidencePool) Update(State, types.EvidenceList) {} -func (EmptyEvidencePool) CheckEvidence(evList types.EvidenceList) error { return nil } -func (EmptyEvidencePool) ReportConflictingVotes(voteA, voteB *types.Vote) {} +func (EmptyEvidencePool) AddEvidence(evidence.Evidence) error { return nil } +func (EmptyEvidencePool) Update(State, evidence.EvidenceList) {} +func (EmptyEvidencePool) CheckEvidence(evList evidence.EvidenceList) error { return nil } +func (EmptyEvidencePool) ReportConflictingVotes(voteA, voteB *consensus.Vote) {} diff --git a/state/state.go b/state/state.go index 132a86fda..42b77860d 100644 --- a/state/state.go +++ b/state/state.go @@ -10,9 +10,13 @@ import ( "github.com/gogo/protobuf/proto" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmversion "github.com/tendermint/tendermint/proto/tendermint/version" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -73,7 +77,7 @@ type State struct { // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) LastBlockHeight int64 - LastBlockID types.BlockID + LastBlockID metadata.BlockID LastBlockTime time.Time // LastValidators is used to validate block.LastCommit. @@ -82,14 +86,14 @@ type State struct { // Note that if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 + 1 // Extra +1 due to nextValSet delay. - NextValidators *types.ValidatorSet - Validators *types.ValidatorSet - LastValidators *types.ValidatorSet + NextValidators *consensus.ValidatorSet + Validators *consensus.ValidatorSet + LastValidators *consensus.ValidatorSet LastHeightValidatorsChanged int64 // Consensus parameters used for validating blocks. // Changes returned by EndBlock and updated after Commit. - ConsensusParams types.ConsensusParams + ConsensusParams consensus.ConsensusParams LastHeightConsensusParamsChanged int64 // Merkle root of the results from executing prev block @@ -206,7 +210,7 @@ func StateFromProto(pb *tmstate.State) (*State, error) { //nolint:golint state.ChainID = pb.ChainID state.InitialHeight = pb.InitialHeight - bi, err := types.BlockIDFromProto(&pb.LastBlockID) + bi, err := metadata.BlockIDFromProto(&pb.LastBlockID) if err != nil { return nil, err } @@ -214,30 +218,30 @@ func StateFromProto(pb *tmstate.State) (*State, error) { //nolint:golint state.LastBlockHeight = pb.LastBlockHeight state.LastBlockTime = pb.LastBlockTime - vals, err := types.ValidatorSetFromProto(pb.Validators) + vals, err := consensus.ValidatorSetFromProto(pb.Validators) if err != nil { return nil, err } state.Validators = vals - nVals, err := types.ValidatorSetFromProto(pb.NextValidators) + nVals, err := consensus.ValidatorSetFromProto(pb.NextValidators) if err != nil { return nil, err } state.NextValidators = nVals if state.LastBlockHeight >= 1 { // At Block 1 LastValidators is nil - lVals, err := types.ValidatorSetFromProto(pb.LastValidators) + lVals, err := consensus.ValidatorSetFromProto(pb.LastValidators) if err != nil { return nil, err } state.LastValidators = lVals } else { - state.LastValidators = types.NewValidatorSet(nil) + state.LastValidators = consensus.NewValidatorSet(nil) } state.LastHeightValidatorsChanged = pb.LastHeightValidatorsChanged - state.ConsensusParams = types.ConsensusParamsFromProto(pb.ConsensusParams) + state.ConsensusParams = consensus.ConsensusParamsFromProto(pb.ConsensusParams) state.LastHeightConsensusParamsChanged = pb.LastHeightConsensusParamsChanged state.LastResultsHash = pb.LastResultsHash state.AppHash = pb.AppHash @@ -253,14 +257,14 @@ func StateFromProto(pb *tmstate.State) (*State, error) { //nolint:golint // track rounds, and hence does not know the correct proposer. TODO: fix this! func (state State) MakeBlock( height int64, - txs []types.Tx, - commit *types.Commit, - evidence []types.Evidence, + txs []mempool.Tx, + commit *metadata.Commit, + evidence []evidence.Evidence, proposerAddress []byte, -) (*types.Block, *types.PartSet) { +) (*block.Block, *metadata.PartSet) { // Build base block with block data. - block := types.MakeBlock(height, txs, commit, evidence) + block := block.MakeBlock(height, txs, commit, evidence) // Set time. var timestamp time.Time @@ -279,14 +283,14 @@ func (state State) MakeBlock( proposerAddress, ) - return block, block.MakePartSet(types.BlockPartSizeBytes) + return block, block.MakePartSet(metadata.BlockPartSizeBytes) } // MedianTime computes a median time for a given Commit (based on Timestamp field of votes messages) and the // corresponding validator set. The computed time is always between timestamps of // the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the // computed value. -func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { +func MedianTime(commit *metadata.Commit, validators *consensus.ValidatorSet) time.Time { weightedTimes := make([]*tmtime.WeightedTime, len(commit.Signatures)) totalVotingPower := int64(0) @@ -321,36 +325,36 @@ func MakeGenesisStateFromFile(genDocFile string) (State, error) { } // MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. -func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { +func MakeGenesisDocFromFile(genDocFile string) (*consensus.GenesisDoc, error) { genDocJSON, err := ioutil.ReadFile(genDocFile) if err != nil { return nil, fmt.Errorf("couldn't read GenesisDoc file: %v", err) } - genDoc, err := types.GenesisDocFromJSON(genDocJSON) + genDoc, err := consensus.GenesisDocFromJSON(genDocJSON) if err != nil { return nil, fmt.Errorf("error reading GenesisDoc: %v", err) } return genDoc, nil } -// MakeGenesisState creates state from types.GenesisDoc. -func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { +// MakeGenesisState creates state from consensus.GenesisDoc. +func MakeGenesisState(genDoc *consensus.GenesisDoc) (State, error) { err := genDoc.ValidateAndComplete() if err != nil { return State{}, fmt.Errorf("error in genesis doc: %w", err) } - var validatorSet, nextValidatorSet *types.ValidatorSet + var validatorSet, nextValidatorSet *consensus.ValidatorSet if genDoc.Validators == nil || len(genDoc.Validators) == 0 { - validatorSet = types.NewValidatorSet(nil) - nextValidatorSet = types.NewValidatorSet(nil) + validatorSet = consensus.NewValidatorSet(nil) + nextValidatorSet = consensus.NewValidatorSet(nil) } else { - validators := make([]*types.Validator, len(genDoc.Validators)) + validators := make([]*consensus.Validator, len(genDoc.Validators)) for i, val := range genDoc.Validators { - validators[i] = types.NewValidator(val.PubKey, val.Power) + validators[i] = consensus.NewValidator(val.PubKey, val.Power) } - validatorSet = types.NewValidatorSet(validators) - nextValidatorSet = types.NewValidatorSet(validators).CopyIncrementProposerPriority(1) + validatorSet = consensus.NewValidatorSet(validators) + nextValidatorSet = consensus.NewValidatorSet(validators).CopyIncrementProposerPriority(1) } return State{ @@ -359,12 +363,12 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { InitialHeight: genDoc.InitialHeight, LastBlockHeight: 0, - LastBlockID: types.BlockID{}, + LastBlockID: metadata.BlockID{}, LastBlockTime: genDoc.GenesisTime, NextValidators: nextValidatorSet, Validators: validatorSet, - LastValidators: types.NewValidatorSet(nil), + LastValidators: consensus.NewValidatorSet(nil), LastHeightValidatorsChanged: genDoc.InitialHeight, ConsensusParams: *genDoc.ConsensusParams, diff --git a/state/state_test.go b/state/state_test.go index 99d45bb62..9ad05fcbd 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -15,14 +15,15 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" cryptoenc "github.com/tendermint/tendermint/crypto/encoding" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" sm "github.com/tendermint/tendermint/state" sf "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/types" ) // setupTestCase does setup common to all test cases. @@ -66,7 +67,7 @@ func TestStateCopy(t *testing.T) { // TestMakeGenesisStateNilValidators tests state's consistency when genesis file's validators field is nil. func TestMakeGenesisStateNilValidators(t *testing.T) { - doc := types.GenesisDoc{ + doc := consensus.GenesisDoc{ ChainID: "dummy", Validators: nil, } @@ -106,7 +107,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { state.LastBlockHeight++ // Build mock responses. - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := sf.MakeBlock(state, 2, new(metadata.Commit)) abciResponses := new(tmstate.ABCIResponses) dtxs := make([]*abci.ResponseDeliverTx, 2) @@ -267,7 +268,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { _, val := state.Validators.GetByIndex(0) power := val.VotingPower var err error - var validatorUpdates []*types.Validator + var validatorUpdates []*consensus.Validator for i := int64(1); i < highestHeight; i++ { // When we get to a change height, use the next pubkey. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { @@ -275,7 +276,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { power++ } header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, power) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) @@ -362,43 +363,43 @@ func TestProposerFrequency(t *testing.T) { nTestCases := 5 for i := 0; i < nTestCases; i++ { N := mrand.Int()%maxVals + 1 - vals := make([]*types.Validator, N) + vals := make([]*consensus.Validator, N) totalVotePower := int64(0) for j := 0; j < N; j++ { // make sure votePower > 0 votePower := int64(mrand.Int()%maxPower) + 1 totalVotePower += votePower - privVal := types.NewMockPV() + privVal := consensus.NewMockPV() pubKey, err := privVal.GetPubKey(context.Background()) require.NoError(t, err) - val := types.NewValidator(pubKey, votePower) + val := consensus.NewValidator(pubKey, votePower) val.ProposerPriority = mrand.Int63() vals[j] = val } - valSet := types.NewValidatorSet(vals) + valSet := consensus.NewValidatorSet(vals) valSet.RescalePriorities(totalVotePower) testProposerFreq(t, i, valSet) } } // new val set with given powers and random initial priorities -func genValSetWithPowers(powers []int64) *types.ValidatorSet { +func genValSetWithPowers(powers []int64) *consensus.ValidatorSet { size := len(powers) - vals := make([]*types.Validator, size) + vals := make([]*consensus.Validator, size) totalVotePower := int64(0) for i := 0; i < size; i++ { totalVotePower += powers[i] - val := types.NewValidator(ed25519.GenPrivKey().PubKey(), powers[i]) + val := consensus.NewValidator(ed25519.GenPrivKey().PubKey(), powers[i]) val.ProposerPriority = mrand.Int63() vals[i] = val } - valSet := types.NewValidatorSet(vals) + valSet := consensus.NewValidatorSet(vals) valSet.RescalePriorities(totalVotePower) return valSet } // test a proposer appears as frequently as expected -func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { +func testProposerFreq(t *testing.T, caseNum int, valSet *consensus.ValidatorSet) { N := valSet.Size() totalPower := valSet.TotalVotingPower() @@ -440,21 +441,21 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { defer tearDown(t) val1VotingPower := int64(10) val1PubKey := ed25519.GenPrivKey().PubKey() - val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower} + val1 := &consensus.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower} - state.Validators = types.NewValidatorSet([]*types.Validator{val1}) + state.Validators = consensus.NewValidatorSet([]*consensus.Validator{val1}) state.NextValidators = state.Validators // NewValidatorSet calls IncrementProposerPriority but uses on a copy of val1 assert.EqualValues(t, 0, val1.ProposerPriority) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) @@ -469,7 +470,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val2VotingPower} - validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) @@ -505,7 +506,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // 1. Add - Val2 VotingPower change to 1 => updatedVotingPowVal2 := int64(1) updateVal := abci.ValidatorUpdate{PubKey: fvp, Power: updatedVotingPowVal2} - validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal}) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal}) assert.NoError(t, err) // this will cause the diff of priorities (77) @@ -554,22 +555,22 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { defer tearDown(t) val1VotingPower := int64(10) val1PubKey := ed25519.GenPrivKey().PubKey() - val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower} + val1 := &consensus.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower} // reset state validators to above validator - state.Validators = types.NewValidatorSet([]*types.Validator{val1}) + state.Validators = consensus.NewValidatorSet([]*consensus.Validator{val1}) state.NextValidators = state.Validators // we only have one validator: assert.Equal(t, val1PubKey.Address(), state.Validators.Proposer.Address) - block := sf.MakeBlock(state, state.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(state, state.LastBlockHeight+1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} // no updates: abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) @@ -586,7 +587,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { fvp, err := cryptoenc.PubKeyToProto(val2PubKey) require.NoError(t, err) updateAddVal := abci.ValidatorUpdate{PubKey: fvp, Power: val1VotingPower} - validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -629,7 +630,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { updatedVal2, ) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) @@ -672,7 +673,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) oldState, err = sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -688,7 +689,7 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -724,16 +725,16 @@ func TestLargeGenesisValidator(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) - genesisVotingPower := types.MaxTotalVotingPower / 1000 + genesisVotingPower := consensus.MaxTotalVotingPower / 1000 genesisPubKey := ed25519.GenPrivKey().PubKey() // fmt.Println("genesis addr: ", genesisPubKey.Address()) - genesisVal := &types.Validator{ + genesisVal := &consensus.Validator{ Address: genesisPubKey.Address(), PubKey: genesisPubKey, VotingPower: genesisVotingPower, } // reset state validators to above validator - state.Validators = types.NewValidatorSet([]*types.Validator{genesisVal}) + state.Validators = consensus.NewValidatorSet([]*consensus.Validator{genesisVal}) state.NextValidators = state.Validators require.True(t, len(state.Validators.Validators) == 1) @@ -746,11 +747,11 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -772,14 +773,14 @@ func TestLargeGenesisValidator(t *testing.T) { fvp, err := cryptoenc.PubKeyToProto(firstAddedValPubKey) require.NoError(t, err) firstAddedVal := abci.ValidatorUpdate{PubKey: fvp, Power: firstAddedValVotingPower} - validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) assert.NoError(t, err) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{firstAddedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -790,11 +791,11 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(lastState, lastState.LastBlockHeight+1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedStateInner, err := sm.UpdateState(lastState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -819,15 +820,15 @@ func TestLargeGenesisValidator(t *testing.T) { ap, err := cryptoenc.PubKeyToProto(addedPubKey) require.NoError(t, err) addedVal := abci.ValidatorUpdate{PubKey: ap, Power: firstAddedValVotingPower} - validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) assert.NoError(t, err) abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{addedVal}}, } - block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} state, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) } @@ -841,9 +842,9 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: []abci.ValidatorUpdate{removeGenesisVal}}, } - block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(types.Commit)) - blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + block = sf.MakeBlock(oldState, oldState.LastBlockHeight+1, new(metadata.Commit)) + blockID = metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) updatedState, err = sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -860,10 +861,10 @@ func TestLargeGenesisValidator(t *testing.T) { BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err = types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(types.Commit)) - blockID = types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block = sf.MakeBlock(curState, curState.LastBlockHeight+1, new(metadata.Commit)) + blockID = metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) if !bytes.Equal(curState.Validators.Proposer.Address, curState.NextValidators.Proposer.Address) { @@ -877,18 +878,18 @@ func TestLargeGenesisValidator(t *testing.T) { assert.Equal(t, firstProposerChangeExpectedAfter, count) // store proposers here to see if we see them again in the same order: numVals := len(updatedState.Validators.Validators) - proposers := make([]*types.Validator, numVals) + proposers := make([]*consensus.Validator, numVals) for i := 0; i < 100; i++ { // no updates: abciResponses := &tmstate.ABCIResponses{ BeginBlock: &abci.ResponseBeginBlock{}, EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, } - validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) + validatorUpdates, err := consensus.PB2TM.ValidatorUpdates(abciResponses.EndBlock.ValidatorUpdates) require.NoError(t, err) - block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(types.Commit)) - blockID := types.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} + block := sf.MakeBlock(updatedState, updatedState.LastBlockHeight+1, new(metadata.Commit)) + blockID := metadata.BlockID{Hash: block.Hash(), PartSetHeader: block.MakePartSet(testPartSize).Header()} updatedState, err = sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) @@ -946,8 +947,8 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, pubkey) // Save state etc. - var validatorUpdates []*types.Validator - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + var validatorUpdates []*consensus.Validator + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) @@ -982,7 +983,7 @@ func TestStateMakeBlock(t *testing.T) { proposerAddress := state.Validators.GetProposer().Address stateVersion := state.Version.Consensus - block := sf.MakeBlock(state, 2, new(types.Commit)) + block := sf.MakeBlock(state, 2, new(metadata.Commit)) // test we set some fields assert.Equal(t, stateVersion, block.Version) @@ -1003,10 +1004,10 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { // Each valset is just one validator. // create list of them. - params := make([]types.ConsensusParams, N+1) + params := make([]consensus.ConsensusParams, N+1) params[0] = state.ConsensusParams for i := 1; i < N+1; i++ { - params[i] = *types.DefaultConsensusParams() + params[i] = *consensus.DefaultConsensusParams() params[i].Block.MaxBytes += int64(i) } @@ -1016,7 +1017,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex := 0 cp := params[changeIndex] var err error - var validatorUpdates []*types.Validator + var validatorUpdates []*consensus.Validator for i := int64(1); i < highestHeight; i++ { // When we get to a change height, use the next params. if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { @@ -1024,7 +1025,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { cp = params[changeIndex] } header, blockID, responses := makeHeaderPartsResponsesParams(state, &cp) - validatorUpdates, err = types.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) + validatorUpdates, err = consensus.PB2TM.ValidatorUpdates(responses.EndBlock.ValidatorUpdates) require.NoError(t, err) state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) diff --git a/state/store.go b/state/store.go index 5ce11e47d..3fae44549 100644 --- a/state/store.go +++ b/state/store.go @@ -9,12 +9,12 @@ import ( "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) const ( @@ -78,17 +78,17 @@ type Store interface { // Load loads the current state of the blockchain Load() (State, error) // LoadValidators loads the validator set at a given height - LoadValidators(int64) (*types.ValidatorSet, error) + LoadValidators(int64) (*consensus.ValidatorSet, error) // LoadABCIResponses loads the abciResponse for a given height LoadABCIResponses(int64) (*tmstate.ABCIResponses, error) // LoadConsensusParams loads the consensus params for a given height - LoadConsensusParams(int64) (types.ConsensusParams, error) + LoadConsensusParams(int64) (consensus.ConsensusParams, error) // Save overwrites the previous state with the updated one Save(State) error // SaveABCIResponses saves ABCIResponses for a given height SaveABCIResponses(int64, *tmstate.ABCIResponses) error // SaveValidatorSet saves the validator set at a given height - SaveValidatorSets(int64, int64, *types.ValidatorSet) error + SaveValidatorSets(int64, int64, *consensus.ValidatorSet) error // Bootstrap is used for bootstrapping state when not starting from a initial height. Bootstrap(State) error // PruneStates takes the height from which to prune up to (exclusive) @@ -401,7 +401,7 @@ func (store dbStore) reverseBatchDelete(batch dbm.Batch, start, end []byte) ([]b // // See merkle.SimpleHashFromByteSlices func ABCIResponsesResultsHash(ar *tmstate.ABCIResponses) []byte { - return types.NewResults(ar.DeliverTxs).Hash() + return abci.NewResults(ar.DeliverTxs).Hash() } // LoadABCIResponses loads the ABCIResponses for the given height from the @@ -465,7 +465,7 @@ func (store dbStore) saveABCIResponses(height int64, abciResponses *tmstate.ABCI // It is exposed so that a backfill operation during state sync can populate // the store with the necessary amount of validator sets to verify any evidence // it may encounter. -func (store dbStore) SaveValidatorSets(lowerHeight, upperHeight int64, vals *types.ValidatorSet) error { +func (store dbStore) SaveValidatorSets(lowerHeight, upperHeight int64, vals *consensus.ValidatorSet) error { batch := store.db.NewBatch() defer batch.Close() @@ -483,7 +483,7 @@ func (store dbStore) SaveValidatorSets(lowerHeight, upperHeight int64, vals *typ // LoadValidators loads the ValidatorSet for a given height. // Returns ErrNoValSetForHeight if the validator set can't be found for this height. -func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { +func (store dbStore) LoadValidators(height int64) (*consensus.ValidatorSet, error) { valInfo, err := loadValidatorsInfo(store.db, height) if err != nil { @@ -501,7 +501,7 @@ func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { ) } - vs, err := types.ValidatorSetFromProto(valInfo2.ValidatorSet) + vs, err := consensus.ValidatorSetFromProto(valInfo2.ValidatorSet) if err != nil { return nil, err } @@ -516,7 +516,7 @@ func (store dbStore) LoadValidators(height int64) (*types.ValidatorSet, error) { valInfo = valInfo2 } - vip, err := types.ValidatorSetFromProto(valInfo.ValidatorSet) + vip, err := consensus.ValidatorSetFromProto(valInfo.ValidatorSet) if err != nil { return nil, err } @@ -559,7 +559,7 @@ func loadValidatorsInfo(db dbm.DB, height int64) (*tmstate.ValidatorsInfo, error // persisted. func (store dbStore) saveValidatorsInfo( height, lastHeightChanged int64, - valSet *types.ValidatorSet, + valSet *consensus.ValidatorSet, batch dbm.Batch, ) error { if lastHeightChanged > height { @@ -592,12 +592,12 @@ func (store dbStore) saveValidatorsInfo( // Allocate empty Consensus params at compile time to avoid multiple allocations during runtime var ( - empty = types.ConsensusParams{} + empty = consensus.ConsensusParams{} emptypb = tmproto.ConsensusParams{} ) // LoadConsensusParams loads the ConsensusParams for a given height. -func (store dbStore) LoadConsensusParams(height int64) (types.ConsensusParams, error) { +func (store dbStore) LoadConsensusParams(height int64) (consensus.ConsensusParams, error) { paramsInfo, err := store.loadConsensusParamsInfo(height) if err != nil { return empty, fmt.Errorf("could not find consensus params for height #%d: %w", height, err) @@ -617,7 +617,7 @@ func (store dbStore) LoadConsensusParams(height int64) (types.ConsensusParams, e paramsInfo = paramsInfo2 } - return types.ConsensusParamsFromProto(paramsInfo.ConsensusParams), nil + return consensus.ConsensusParamsFromProto(paramsInfo.ConsensusParams), nil } func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusParamsInfo, error) { @@ -646,7 +646,7 @@ func (store dbStore) loadConsensusParamsInfo(height int64) (*tmstate.ConsensusPa // only the last height for which they changed is persisted. func (store dbStore) saveConsensusParamsInfo( nextHeight, changeHeight int64, - params types.ConsensusParams, + params consensus.ConsensusParams, batch dbm.Batch, ) error { paramsInfo := &tmstate.ConsensusParamsInfo{ diff --git a/state/store_test.go b/state/store_test.go index 5d32040b5..d439f0251 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -10,15 +10,15 @@ import ( dbm "github.com/tendermint/tm-db" - abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/test/factory" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/abci" + "github.com/tendermint/tendermint/pkg/consensus" tmstate "github.com/tendermint/tendermint/proto/tendermint/state" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) const ( @@ -32,7 +32,7 @@ func TestStoreBootstrap(t *testing.T) { val, _ := factory.RandValidator(true, 10) val2, _ := factory.RandValidator(true, 10) val3, _ := factory.RandValidator(true, 10) - vals := types.NewValidatorSet([]*types.Validator{val, val2, val3}) + vals := consensus.NewValidatorSet([]*consensus.Validator{val, val2, val3}) bootstrapState := makeRandomStateFromValidatorSet(vals, 100, 100) err := stateStore.Bootstrap(bootstrapState) require.NoError(t, err) @@ -58,7 +58,7 @@ func TestStoreLoadValidators(t *testing.T) { val, _ := factory.RandValidator(true, 10) val2, _ := factory.RandValidator(true, 10) val3, _ := factory.RandValidator(true, 10) - vals := types.NewValidatorSet([]*types.Validator{val, val2, val3}) + vals := consensus.NewValidatorSet([]*consensus.Validator{val, val2, val3}) // 1) LoadValidators loads validators using a height where they were last changed // Note that only the next validators at height h + 1 are saved @@ -142,15 +142,15 @@ func BenchmarkLoadValidators(b *testing.B) { func TestStoreLoadConsensusParams(t *testing.T) { stateDB := dbm.NewMemDB() stateStore := sm.NewStore(stateDB) - err := stateStore.Save(makeRandomStateFromConsensusParams(types.DefaultConsensusParams(), 1, 1)) + err := stateStore.Save(makeRandomStateFromConsensusParams(consensus.DefaultConsensusParams(), 1, 1)) require.NoError(t, err) params, err := stateStore.LoadConsensusParams(1) require.NoError(t, err) - require.Equal(t, types.DefaultConsensusParams(), ¶ms) + require.Equal(t, consensus.DefaultConsensusParams(), ¶ms) // we give the state store different params but say that the height hasn't changed, hence // it should save a pointer to the params at height 1 - differentParams := types.DefaultConsensusParams() + differentParams := consensus.DefaultConsensusParams() differentParams.Block.MaxBytes = 20000 err = stateStore.Save(makeRandomStateFromConsensusParams(differentParams, 10, 1)) require.NoError(t, err) @@ -189,9 +189,9 @@ func TestPruneStates(t *testing.T) { // Generate a bunch of state data. Validators change for heights ending with 3, and // parameters when ending with 5. - validator := &types.Validator{Address: tmrand.Bytes(crypto.AddressSize), VotingPower: 100, PubKey: pk} - validatorSet := &types.ValidatorSet{ - Validators: []*types.Validator{validator}, + validator := &consensus.Validator{Address: tmrand.Bytes(crypto.AddressSize), VotingPower: 100, PubKey: pk} + validatorSet := &consensus.ValidatorSet{ + Validators: []*consensus.Validator{validator}, Proposer: validator, } valsChanged := int64(0) @@ -210,8 +210,8 @@ func TestPruneStates(t *testing.T) { LastBlockHeight: h - 1, Validators: validatorSet, NextValidators: validatorSet, - ConsensusParams: types.ConsensusParams{ - Block: types.BlockParams{MaxBytes: 10e6}, + ConsensusParams: consensus.ConsensusParams{ + Block: consensus.BlockParams{MaxBytes: 10e6}, }, LastHeightValidatorsChanged: valsChanged, LastHeightConsensusParamsChanged: paramsChanged, @@ -256,7 +256,7 @@ func TestPruneStates(t *testing.T) { require.NotNil(t, abci, h) } - emptyParams := types.ConsensusParams{} + emptyParams := consensus.ConsensusParams{} for h := tc.startHeight; h < tc.pruneHeight; h++ { vals, err := stateStore.LoadValidators(h) @@ -297,7 +297,7 @@ func TestABCIResponsesResultsHash(t *testing.T) { root := sm.ABCIResponsesResultsHash(responses) // root should be Merkle tree root of DeliverTxs responses - results := types.NewResults(responses.DeliverTxs) + results := abci.NewResults(responses.DeliverTxs) assert.Equal(t, root, results.Hash()) // test we can prove first DeliverTx diff --git a/state/test/factory/block.go b/state/test/factory/block.go index b4eb83fa7..fe585ceaa 100644 --- a/state/test/factory/block.go +++ b/state/test/factory/block.go @@ -4,11 +4,14 @@ import ( "time" "github.com/tendermint/tendermint/internal/test/factory" + types "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/mempool" + "github.com/tendermint/tendermint/pkg/metadata" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) -func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Block { +func MakeBlocks(n int, state *sm.State, privVal consensus.PrivValidator) []*types.Block { blocks := make([]*types.Block, 0) var ( @@ -35,7 +38,7 @@ func MakeBlocks(n int, state *sm.State, privVal types.PrivValidator) []*types.Bl return blocks } -func MakeBlock(state sm.State, height int64, c *types.Commit) *types.Block { +func MakeBlock(state sm.State, height int64, c *metadata.Commit) *types.Block { block, _ := state.MakeBlock( height, factory.MakeTenTxs(state.LastBlockHeight), @@ -47,9 +50,9 @@ func MakeBlock(state sm.State, height int64, c *types.Commit) *types.Block { } func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta *types.BlockMeta, - privVal types.PrivValidator, height int64) (*types.Block, *types.PartSet) { + privVal consensus.PrivValidator, height int64) (*types.Block, *metadata.PartSet) { - lastCommit := types.NewCommit(height-1, 0, types.BlockID{}, nil) + lastCommit := metadata.NewCommit(height-1, 0, metadata.BlockID{}, nil) if height > 1 { vote, _ := factory.MakeVote( privVal, @@ -57,9 +60,9 @@ func makeBlockAndPartSet(state sm.State, lastBlock *types.Block, lastBlockMeta * 1, lastBlock.Header.Height, 0, 2, lastBlockMeta.BlockID, time.Now()) - lastCommit = types.NewCommit(vote.Height, vote.Round, - lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) + lastCommit = metadata.NewCommit(vote.Height, vote.Round, + lastBlockMeta.BlockID, []metadata.CommitSig{vote.CommitSig()}) } - return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) + return state.MakeBlock(height, []mempool.Tx{}, lastCommit, nil, state.Validators.GetProposer().Address) } diff --git a/state/tx_filter.go b/state/tx_filter.go index 61340e135..d63c849c0 100644 --- a/state/tx_filter.go +++ b/state/tx_filter.go @@ -2,13 +2,13 @@ package state import ( mempl "github.com/tendermint/tendermint/internal/mempool" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" ) // TxPreCheck returns a function to filter transactions before processing. // The function limits the size of a transaction to the block's maximum data size. func TxPreCheck(state State) mempl.PreCheckFunc { - maxDataBytes := types.MaxDataBytesNoEvidence( + maxDataBytes := block.MaxDataBytesNoEvidence( state.ConsensusParams.Block.MaxBytes, state.Validators.Size(), ) diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index d6236fcbf..4d3c6e8e7 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -7,8 +7,8 @@ import ( "github.com/stretchr/testify/require" tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/pkg/mempool" sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" ) func TestTxFilter(t *testing.T) { @@ -19,12 +19,12 @@ func TestTxFilter(t *testing.T) { // Max size of Txs is much smaller than size of block, // since we need to account for commits and evidence. testCases := []struct { - tx types.Tx + tx mempool.Tx isErr bool }{ - {types.Tx(tmrand.Bytes(2155)), false}, - {types.Tx(tmrand.Bytes(2156)), true}, - {types.Tx(tmrand.Bytes(3000)), true}, + {mempool.Tx(tmrand.Bytes(2155)), false}, + {mempool.Tx(tmrand.Bytes(2156)), true}, + {mempool.Tx(tmrand.Bytes(3000)), true}, } for i, tc := range testCases { diff --git a/state/validation.go b/state/validation.go index fbd285f8a..1da9c2cef 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,13 +5,14 @@ import ( "errors" "fmt" - "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/evidence" ) //----------------------------------------------------- // Validate block -func validateBlock(state State, block *types.Block) error { +func validateBlock(state State, block *block.Block) error { // Validate internal consistency. if err := block.ValidateBasic(); err != nil { return err @@ -138,7 +139,7 @@ func validateBlock(state State, block *types.Block) error { // Check evidence doesn't exceed the limit amount of bytes. if max, got := state.ConsensusParams.Evidence.MaxBytes, block.Evidence.ByteSize(); got > max { - return types.NewErrEvidenceOverflow(max, got) + return evidence.NewErrEvidenceOverflow(max, got) } return nil diff --git a/state/validation_test.go b/state/validation_test.go index 151f2be61..be74264b9 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -9,19 +9,22 @@ import ( "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" memmock "github.com/tendermint/tendermint/internal/mempool/mock" "github.com/tendermint/tendermint/internal/test/factory" "github.com/tendermint/tendermint/libs/log" tmtime "github.com/tendermint/tendermint/libs/time" + "github.com/tendermint/tendermint/pkg/abci" + types "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/metadata" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/mocks" sf "github.com/tendermint/tendermint/state/test/factory" "github.com/tendermint/tendermint/store" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" ) @@ -43,7 +46,7 @@ func TestValidateBlockHeader(t *testing.T) { sm.EmptyEvidencePool{}, blockStore, ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) + lastCommit := metadata.NewCommit(0, 0, metadata.BlockID{}, nil) // some bad values wrongHash := tmhash.Sum([]byte("this hash is wrong")) @@ -79,7 +82,7 @@ func TestValidateBlockHeader(t *testing.T) { {"Proposer invalid", func(block *types.Block) { block.ProposerAddress = []byte("wrong size") }}, {"first LastCommit contains signatures", func(block *types.Block) { - block.LastCommit = types.NewCommit(0, 0, types.BlockID{}, []types.CommitSig{types.NewCommitSigAbsent()}) + block.LastCommit = metadata.NewCommit(0, 0, metadata.BlockID{}, []metadata.CommitSig{metadata.NewCommitSigAbsent()}) block.LastCommitHash = block.LastCommit.Hash() }}, } @@ -130,9 +133,9 @@ func TestValidateBlockCommit(t *testing.T) { sm.EmptyEvidencePool{}, blockStore, ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) - wrongSigsCommit := types.NewCommit(1, 0, types.BlockID{}, nil) - badPrivVal := types.NewMockPV() + lastCommit := metadata.NewCommit(0, 0, metadata.BlockID{}, nil) + wrongSigsCommit := metadata.NewCommit(1, 0, metadata.BlockID{}, nil) + badPrivVal := consensus.NewMockPV() for height := int64(1); height < validationTestsStopHeight; height++ { proposerAddr := state.Validators.GetProposer().Address @@ -152,15 +155,15 @@ func TestValidateBlockCommit(t *testing.T) { time.Now(), ) require.NoError(t, err, "height %d", height) - wrongHeightCommit := types.NewCommit( + wrongHeightCommit := metadata.NewCommit( wrongHeightVote.Height, wrongHeightVote.Round, state.LastBlockID, - []types.CommitSig{wrongHeightVote.CommitSig()}, + []metadata.CommitSig{wrongHeightVote.CommitSig()}, ) block := sf.MakeBlock(state, height, wrongHeightCommit) err = blockExec.ValidateBlock(state, block) - _, isErrInvalidCommitHeight := err.(types.ErrInvalidCommitHeight) + _, isErrInvalidCommitHeight := err.(consensus.ErrInvalidCommitHeight) require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) /* @@ -168,7 +171,7 @@ func TestValidateBlockCommit(t *testing.T) { */ block = sf.MakeBlock(state, height, wrongSigsCommit) err = blockExec.ValidateBlock(state, block) - _, isErrInvalidCommitSignatures := err.(types.ErrInvalidCommitSignatures) + _, isErrInvalidCommitSignatures := err.(consensus.ErrInvalidCommitSignatures) require.True(t, isErrInvalidCommitSignatures, "expected ErrInvalidCommitSignatures at height %d, but got: %v", height, @@ -180,7 +183,7 @@ func TestValidateBlockCommit(t *testing.T) { A good block passes */ var err error - var blockID types.BlockID + var blockID metadata.BlockID state, blockID, lastCommit, err = makeAndCommitGoodBlock( state, height, @@ -210,7 +213,7 @@ func TestValidateBlockCommit(t *testing.T) { bpvPubKey, err := badPrivVal.GetPubKey(context.Background()) require.NoError(t, err) - badVote := &types.Vote{ + badVote := &consensus.Vote{ ValidatorAddress: bpvPubKey.Address(), ValidatorIndex: 0, Height: height, @@ -230,8 +233,8 @@ func TestValidateBlockCommit(t *testing.T) { goodVote.Signature, badVote.Signature = g.Signature, b.Signature - wrongSigsCommit = types.NewCommit(goodVote.Height, goodVote.Round, - blockID, []types.CommitSig{goodVote.CommitSig(), badVote.CommitSig()}) + wrongSigsCommit = metadata.NewCommit(goodVote.Height, goodVote.Round, + blockID, []metadata.CommitSig{goodVote.CommitSig(), badVote.CommitSig()}) } } @@ -260,7 +263,7 @@ func TestValidateBlockEvidence(t *testing.T) { evpool, blockStore, ) - lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) + lastCommit := metadata.NewCommit(0, 0, metadata.BlockID{}, nil) for height := int64(1); height < validationTestsStopHeight; height++ { proposerAddr := state.Validators.GetProposer().Address @@ -269,19 +272,19 @@ func TestValidateBlockEvidence(t *testing.T) { /* A block with too much evidence fails */ - evidence := make([]types.Evidence, 0) + ev := make([]evidence.Evidence, 0) var currentBytes int64 = 0 // more bytes than the maximum allowed for evidence for currentBytes <= maxBytesEvidence { - newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), + newEv := evidence.NewMockDuplicateVoteEvidenceWithValidator(height, time.Now(), privVals[proposerAddr.String()], chainID) - evidence = append(evidence, newEv) + ev = append(ev, newEv) currentBytes += int64(len(newEv.Bytes())) } - block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, evidence, proposerAddr) + block, _ := state.MakeBlock(height, factory.MakeTenTxs(height), lastCommit, ev, proposerAddr) err := blockExec.ValidateBlock(state, block) if assert.Error(t, err) { - _, ok := err.(*types.ErrEvidenceOverflow) + _, ok := err.(*evidence.ErrEvidenceOverflow) require.True(t, ok, "expected error to be of type ErrEvidenceOverflow at height %d but got %v", height, err) } } @@ -289,17 +292,17 @@ func TestValidateBlockEvidence(t *testing.T) { /* A good block with several pieces of good evidence passes */ - evidence := make([]types.Evidence, 0) + ev := make([]evidence.Evidence, 0) var currentBytes int64 = 0 // precisely the amount of allowed evidence for { - newEv := types.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, + newEv := evidence.NewMockDuplicateVoteEvidenceWithValidator(height, defaultEvidenceTime, privVals[proposerAddr.String()], chainID) currentBytes += int64(len(newEv.Bytes())) if currentBytes >= maxBytesEvidence { break } - evidence = append(evidence, newEv) + ev = append([]evidence.Evidence{}, newEv) } var err error @@ -310,7 +313,7 @@ func TestValidateBlockEvidence(t *testing.T) { proposerAddr, blockExec, privVals, - evidence, + ev, ) require.NoError(t, err, "height %d", height) } diff --git a/store/store.go b/store/store.go index 8848b76d9..33eddf857 100644 --- a/store/store.go +++ b/store/store.go @@ -9,8 +9,9 @@ import ( "github.com/google/orderedcode" dbm "github.com/tendermint/tm-db" + types "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) /* @@ -148,7 +149,7 @@ func (bs *BlockStore) LoadBlock(height int64) *types.Block { err := proto.Unmarshal(buf, pbb) if err != nil { // NOTE: The existence of meta should imply the existence of the - // block. So, make sure meta is only saved after blocks are saved. + // types. So, make sure meta is only saved after blocks are saved. panic(fmt.Sprintf("Error reading block: %v", err)) } @@ -184,7 +185,7 @@ func (bs *BlockStore) LoadBlockByHash(hash []byte) *types.Block { // LoadBlockPart returns the Part at the given index // from the block at the given height. // If no part is found for the given height and index, it returns nil. -func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { +func (bs *BlockStore) LoadBlockPart(height int64, index int) *metadata.Part { var pbpart = new(tmproto.Part) bz, err := bs.db.Get(blockPartKey(height, index)) @@ -199,7 +200,7 @@ func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { if err != nil { panic(fmt.Errorf("unmarshal to tmproto.Part failed: %w", err)) } - part, err := types.PartFromProto(pbpart) + part, err := metadata.PartFromProto(pbpart) if err != nil { panic(fmt.Sprintf("Error reading block part: %v", err)) } @@ -236,9 +237,9 @@ func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { // LoadBlockCommit returns the Commit for the given height. // This commit consists of the +2/3 and other Precommit-votes for block at `height`, -// and it comes from the block.LastCommit for `height+1`. +// and it comes from the types.LastCommit for `height+1`. // If no commit is found for the given height, it returns nil. -func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { +func (bs *BlockStore) LoadBlockCommit(height int64) *metadata.Commit { var pbc = new(tmproto.Commit) bz, err := bs.db.Get(blockCommitKey(height)) if err != nil { @@ -251,7 +252,7 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { if err != nil { panic(fmt.Errorf("error reading block commit: %w", err)) } - commit, err := types.CommitFromProto(pbc) + commit, err := metadata.CommitFromProto(pbc) if err != nil { panic(fmt.Sprintf("Error reading block commit: %v", err)) } @@ -261,8 +262,8 @@ func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { // LoadSeenCommit returns the last locally seen Commit before being // cannonicalized. This is useful when we've seen a commit, but there // has not yet been a new block at `height + 1` that includes this -// commit in its block.LastCommit. -func (bs *BlockStore) LoadSeenCommit() *types.Commit { +// commit in its types.LastCommit. +func (bs *BlockStore) LoadSeenCommit() *metadata.Commit { var pbc = new(tmproto.Commit) bz, err := bs.db.Get(seenCommitKey()) if err != nil { @@ -276,7 +277,7 @@ func (bs *BlockStore) LoadSeenCommit() *types.Commit { panic(fmt.Sprintf("error reading block seen commit: %v", err)) } - commit, err := types.CommitFromProto(pbc) + commit, err := metadata.CommitFromProto(pbc) if err != nil { panic(fmt.Errorf("error from proto commit: %w", err)) } @@ -426,7 +427,7 @@ func (bs *BlockStore) batchDelete( // If all the nodes restart after committing a block, // we need this to reload the precommits to catch-up nodes to the // most recent height. Otherwise they'd stall at H-1. -func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { +func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *metadata.PartSet, seenCommit *metadata.Commit) { if block == nil { panic("BlockStore can only save a non-nil block") } @@ -489,7 +490,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s } } -func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, batch dbm.Batch) { +func (bs *BlockStore) saveBlockPart(height int64, index int, part *metadata.Part, batch dbm.Batch) { pbp, err := part.ToProto() if err != nil { panic(fmt.Errorf("unable to make part into proto: %w", err)) @@ -501,7 +502,7 @@ func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part, b } // SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node. -func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error { +func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *metadata.Commit) error { pbc := seenCommit.ToProto() seenCommitBytes, err := proto.Marshal(pbc) if err != nil { @@ -510,7 +511,7 @@ func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) err return bs.db.Set(seenCommitKey(), seenCommitBytes) } -func (bs *BlockStore) SaveSignedHeader(sh *types.SignedHeader, blockID types.BlockID) error { +func (bs *BlockStore) SaveSignedHeader(sh *metadata.SignedHeader, blockID metadata.BlockID) error { // first check that the block store doesn't already have the block bz, err := bs.db.Get(blockMetaKey(sh.Height)) if err != nil { diff --git a/store/store_test.go b/store/store_test.go index 2132d9aff..585acecd6 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -17,9 +17,10 @@ import ( "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" tmtime "github.com/tendermint/tendermint/libs/time" + types "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/test/factory" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -28,19 +29,19 @@ import ( type cleanupFunc func() // make a Commit with a single vote containing just the height and a timestamp -func makeTestCommit(height int64, timestamp time.Time) *types.Commit { - commitSigs := []types.CommitSig{{ - BlockIDFlag: types.BlockIDFlagCommit, +func makeTestCommit(height int64, timestamp time.Time) *metadata.Commit { + commitSigs := []metadata.CommitSig{{ + BlockIDFlag: metadata.BlockIDFlagCommit, ValidatorAddress: tmrand.Bytes(crypto.AddressSize), Timestamp: timestamp, Signature: []byte("Signature"), }} - return types.NewCommit( + return metadata.NewCommit( height, 0, - types.BlockID{ + metadata.BlockID{ Hash: crypto.CRandBytes(32), - PartSetHeader: types.PartSetHeader{Hash: crypto.CRandBytes(32), Total: 2}, + PartSetHeader: metadata.PartSetHeader{Hash: crypto.CRandBytes(32), Total: 2}, }, commitSigs) } @@ -63,16 +64,16 @@ func freshBlockStore() (*BlockStore, dbm.DB) { var ( state sm.State block *types.Block - partSet *types.PartSet - part1 *types.Part - part2 *types.Part - seenCommit1 *types.Commit + partSet *metadata.PartSet + part1 *metadata.Part + part2 *metadata.Part + seenCommit1 *metadata.Commit ) func TestMain(m *testing.M) { var cleanup cleanupFunc state, _, cleanup = makeStateAndBlockStore(log.NewNopLogger()) - block = factory.MakeBlock(state, 1, new(types.Commit)) + block = factory.MakeBlock(state, 1, new(metadata.Commit)) partSet = block.MakePartSet(2) part1 = partSet.GetPart(0) part2 = partSet.GetPart(1) @@ -98,19 +99,19 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { } // save a block - block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) + block := factory.MakeBlock(state, bs.Height()+1, new(metadata.Commit)) validPartSet := block.MakePartSet(2) seenCommit := makeTestCommit(10, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") - incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) - uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) + incompletePartSet := metadata.NewPartSetFromHeader(metadata.PartSetHeader{Total: 2}) + uncontiguousPartSet := metadata.NewPartSetFromHeader(metadata.PartSetHeader{Total: 0}) _, err := uncontiguousPartSet.AddPart(part2) require.Error(t, err) - header1 := types.Header{ + header1 := metadata.Header{ Version: version.Consensus{Block: version.BlockProtocol}, Height: 1, ChainID: "block_test", @@ -122,8 +123,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { commitAtH10 := makeTestCommit(10, tmtime.Now()) tuples := []struct { block *types.Block - parts *types.PartSet - seenCommit *types.Commit + parts *metadata.PartSet + seenCommit *metadata.Commit wantPanic string wantErr bool @@ -146,7 +147,7 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { { block: newBlock( // New block at height 5 in empty block store is fine - types.Header{ + metadata.Header{ Version: version.Consensus{Block: version.BlockProtocol}, Height: 5, ChainID: "block_test", @@ -210,10 +211,10 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { type quad struct { block *types.Block - commit *types.Commit + commit *metadata.Commit meta *types.BlockMeta - seenCommit *types.Commit + seenCommit *metadata.Commit } for i, tuple := range tuples { @@ -299,7 +300,7 @@ func TestLoadBaseMeta(t *testing.T) { bs := NewBlockStore(dbm.NewMemDB()) for h := int64(1); h <= 10; h++ { - block := factory.MakeBlock(state, h, new(types.Commit)) + block := factory.MakeBlock(state, h, new(metadata.Commit)) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) @@ -343,7 +344,7 @@ func TestLoadBlockPart(t *testing.T) { gotPart, _, panicErr := doFn(loadPart) require.Nil(t, panicErr, "an existent and proper block should not panic") require.Nil(t, res, "a properly saved block should return a proper block") - require.Equal(t, gotPart.(*types.Part), part1, + require.Equal(t, gotPart.(*metadata.Part), part1, "expecting successful retrieval of previously saved block") } @@ -363,7 +364,7 @@ func TestPruneBlocks(t *testing.T) { // make more than 1000 blocks, to test batch deletions for h := int64(1); h <= 1500; h++ { - block := factory.MakeBlock(state, h, new(types.Commit)) + block := factory.MakeBlock(state, h, new(metadata.Commit)) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(h, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) @@ -447,7 +448,7 @@ func TestLoadBlockMeta(t *testing.T) { require.Contains(t, panicErr.Error(), "unmarshal to tmproto.BlockMeta") // 3. A good blockMeta serialized and saved to the DB should be retrievable - meta := &types.BlockMeta{Header: types.Header{ + meta := &types.BlockMeta{Header: metadata.Header{ Version: version.Consensus{ Block: version.BlockProtocol, App: 0}, Height: 1, ProposerAddress: tmrand.Bytes(crypto.AddressSize)}} pbm := meta.ToProto() @@ -468,7 +469,7 @@ func TestBlockFetchAtHeight(t *testing.T) { state, bs, cleanup := makeStateAndBlockStore(log.NewNopLogger()) defer cleanup() require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") - block := factory.MakeBlock(state, bs.Height()+1, new(types.Commit)) + block := factory.MakeBlock(state, bs.Height()+1, new(metadata.Commit)) partSet := block.MakePartSet(2) seenCommit := makeTestCommit(10, tmtime.Now()) @@ -547,7 +548,7 @@ func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr return res, err, panicErr } -func newBlock(hdr types.Header, lastCommit *types.Commit) *types.Block { +func newBlock(hdr metadata.Header, lastCommit *metadata.Commit) *types.Block { return &types.Block{ Header: hdr, LastCommit: lastCommit, diff --git a/test/e2e/app/app.go b/test/e2e/app/app.go index 26b10d32a..1adc12c55 100644 --- a/test/e2e/app/app.go +++ b/test/e2e/app/app.go @@ -9,8 +9,8 @@ import ( "strconv" "github.com/tendermint/tendermint/abci/example/code" - abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/abci" "github.com/tendermint/tendermint/version" ) diff --git a/test/e2e/app/snapshots.go b/test/e2e/app/snapshots.go index 4ddb7ecdc..83a24fa7f 100644 --- a/test/e2e/app/snapshots.go +++ b/test/e2e/app/snapshots.go @@ -11,7 +11,7 @@ import ( "path/filepath" "sync" - abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/pkg/abci" ) const ( diff --git a/test/e2e/generator/generate.go b/test/e2e/generator/generate.go index f699b1162..b7ce758e8 100644 --- a/test/e2e/generator/generate.go +++ b/test/e2e/generator/generate.go @@ -6,8 +6,8 @@ import ( "sort" "strings" + "github.com/tendermint/tendermint/pkg/consensus" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) var ( @@ -45,7 +45,7 @@ var ( evidence = uniformChoice{0, 1, 10} txSize = uniformChoice{1024, 10240} // either 1kb or 10kb ipv6 = uniformChoice{false, true} - keyType = uniformChoice{types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1} + keyType = uniformChoice{consensus.ABCIPubKeyTypeEd25519, consensus.ABCIPubKeyTypeSecp256k1} ) // Generate generates random testnets using the given RNG. diff --git a/test/e2e/pkg/testnet.go b/test/e2e/pkg/testnet.go index cfeb54bde..9caf4a546 100644 --- a/test/e2e/pkg/testnet.go +++ b/test/e2e/pkg/testnet.go @@ -16,8 +16,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" + "github.com/tendermint/tendermint/pkg/consensus" rpchttp "github.com/tendermint/tendermint/rpc/client/http" - "github.com/tendermint/tendermint/types" ) const ( @@ -295,7 +295,7 @@ func (t Testnet) Validate() error { return errors.New("network has no nodes") } switch t.KeyType { - case "", types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1: + case "", consensus.ABCIPubKeyTypeEd25519, consensus.ABCIPubKeyTypeSecp256k1: default: return errors.New("unsupported KeyType") } diff --git a/test/e2e/runner/benchmark.go b/test/e2e/runner/benchmark.go index 74d2491f5..ee24d836e 100644 --- a/test/e2e/runner/benchmark.go +++ b/test/e2e/runner/benchmark.go @@ -8,8 +8,8 @@ import ( "path/filepath" "time" + "github.com/tendermint/tendermint/pkg/block" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) // Benchmark is a simple function for fetching, calculating and printing @@ -63,7 +63,7 @@ func Benchmark(testnet *e2e.Testnet, benchmarkLength int64) error { return nil } -func (t *testnetStats) populateTxns(blocks []*types.BlockMeta) { +func (t *testnetStats) populateTxns(blocks []*block.BlockMeta) { t.numtxns = 0 for _, b := range blocks { t.numtxns += int64(b.NumTxs) @@ -126,8 +126,8 @@ func (t *testnetStats) String() string { // fetchBlockChainSample waits for `benchmarkLength` amount of blocks to pass, fetching // all of the headers for these blocks from an archive node and returning it. -func fetchBlockChainSample(testnet *e2e.Testnet, benchmarkLength int64) ([]*types.BlockMeta, error) { - var blocks []*types.BlockMeta +func fetchBlockChainSample(testnet *e2e.Testnet, benchmarkLength int64) ([]*block.BlockMeta, error) { + var blocks []*block.BlockMeta // Find the first archive node archiveNode := testnet.ArchiveNodes()[0] @@ -174,7 +174,7 @@ func fetchBlockChainSample(testnet *e2e.Testnet, benchmarkLength int64) ([]*type return blocks, nil } -func splitIntoBlockIntervals(blocks []*types.BlockMeta) []time.Duration { +func splitIntoBlockIntervals(blocks []*block.BlockMeta) []time.Duration { intervals := make([]time.Duration, len(blocks)-1) lastTime := blocks[0].Header.Time for i, block := range blocks { diff --git a/test/e2e/runner/evidence.go b/test/e2e/runner/evidence.go index 6a246dcb5..9fa700e03 100644 --- a/test/e2e/runner/evidence.go +++ b/test/e2e/runner/evidence.go @@ -13,10 +13,13 @@ import ( "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/internal/test/factory" tmjson "github.com/tendermint/tendermint/libs/json" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/evidence" + "github.com/tendermint/tendermint/pkg/light" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/privval" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" ) @@ -52,7 +55,7 @@ func InjectEvidence(testnet *e2e.Testnet, amount int) error { return err } - valSet, err := types.ValidatorSetFromExistingValidators(valRes.Validators) + valSet, err := consensus.ValidatorSetFromExistingValidators(valRes.Validators) if err != nil { return err } @@ -70,7 +73,7 @@ func InjectEvidence(testnet *e2e.Testnet, amount int) error { return err } - var ev types.Evidence + var ev evidence.Evidence for i := 1; i <= amount; i++ { if i%lightClientEvidenceRatio == 0 { ev, err = generateLightClientAttackEvidence( @@ -103,8 +106,8 @@ func InjectEvidence(testnet *e2e.Testnet, amount int) error { return nil } -func getPrivateValidatorKeys(testnet *e2e.Testnet) ([]types.MockPV, error) { - privVals := []types.MockPV{} +func getPrivateValidatorKeys(testnet *e2e.Testnet) ([]consensus.MockPV, error) { + privVals := []consensus.MockPV{} for _, node := range testnet.Nodes { if node.Mode == e2e.ModeValidator { @@ -115,7 +118,7 @@ func getPrivateValidatorKeys(testnet *e2e.Testnet) ([]types.MockPV, error) { } // Create mock private validators from the validators private key. MockPV is // stateless which means we can double vote and do other funky stuff - privVals = append(privVals, types.NewMockPVWithParams(privKey, false, false)) + privVals = append(privVals, consensus.NewMockPVWithParams(privKey, false, false)) } } @@ -125,12 +128,12 @@ func getPrivateValidatorKeys(testnet *e2e.Testnet) ([]types.MockPV, error) { // creates evidence of a lunatic attack. The height provided is the common height. // The forged height happens 2 blocks later. func generateLightClientAttackEvidence( - privVals []types.MockPV, + privVals []consensus.MockPV, height int64, - vals *types.ValidatorSet, + vals *consensus.ValidatorSet, chainID string, evTime time.Time, -) (*types.LightClientAttackEvidence, error) { +) (*evidence.LightClientAttackEvidence, error) { // forge a random header forgedHeight := height + 2 forgedTime := evTime.Add(1 * time.Second) @@ -148,15 +151,15 @@ func generateLightClientAttackEvidence( // create a commit for the forged header blockID := makeBlockID(header.Hash(), 1000, []byte("partshash")) - voteSet := types.NewVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals) + voteSet := consensus.NewVoteSet(chainID, forgedHeight, 0, tmproto.SignedMsgType(2), conflictingVals) commit, err := factory.MakeCommit(blockID, forgedHeight, 0, voteSet, pv, forgedTime) if err != nil { return nil, err } - ev := &types.LightClientAttackEvidence{ - ConflictingBlock: &types.LightBlock{ - SignedHeader: &types.SignedHeader{ + ev := &evidence.LightClientAttackEvidence{ + ConflictingBlock: &light.LightBlock{ + SignedHeader: &metadata.SignedHeader{ Header: header, Commit: commit, }, @@ -166,7 +169,7 @@ func generateLightClientAttackEvidence( TotalVotingPower: vals.TotalVotingPower(), Timestamp: evTime, } - ev.ByzantineValidators = ev.GetByzantineValidators(vals, &types.SignedHeader{ + ev.ByzantineValidators = ev.GetByzantineValidators(vals, &metadata.SignedHeader{ Header: makeHeaderRandom(chainID, forgedHeight), }) return ev, nil @@ -175,12 +178,12 @@ func generateLightClientAttackEvidence( // generateDuplicateVoteEvidence picks a random validator from the val set and // returns duplicate vote evidence against the validator func generateDuplicateVoteEvidence( - privVals []types.MockPV, + privVals []consensus.MockPV, height int64, - vals *types.ValidatorSet, + vals *consensus.ValidatorSet, chainID string, time time.Time, -) (*types.DuplicateVoteEvidence, error) { +) (*evidence.DuplicateVoteEvidence, error) { // nolint:gosec // G404: Use of weak random number generator privVal := privVals[rand.Intn(len(privVals))] @@ -193,7 +196,7 @@ func generateDuplicateVoteEvidence( if err != nil { return nil, err } - ev := types.NewDuplicateVoteEvidence(voteA, voteB, time, vals) + ev := evidence.NewDuplicateVoteEvidence(voteA, voteB, time, vals) if ev == nil { return nil, fmt.Errorf("could not generate evidence a=%v b=%v vals=%v", voteA, voteB, vals) } @@ -215,8 +218,8 @@ func readPrivKey(keyFilePath string) (crypto.PrivKey, error) { return pvKey.PrivKey, nil } -func makeHeaderRandom(chainID string, height int64) *types.Header { - return &types.Header{ +func makeHeaderRandom(chainID string, height int64) *metadata.Header { + return &metadata.Header{ Version: version.Consensus{Block: version.BlockProtocol, App: 1}, ChainID: chainID, Height: height, @@ -234,42 +237,42 @@ func makeHeaderRandom(chainID string, height int64) *types.Header { } } -func makeRandomBlockID() types.BlockID { +func makeRandomBlockID() metadata.BlockID { return makeBlockID(crypto.CRandBytes(tmhash.Size), 100, crypto.CRandBytes(tmhash.Size)) } -func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) types.BlockID { +func makeBlockID(hash []byte, partSetSize uint32, partSetHash []byte) metadata.BlockID { var ( h = make([]byte, tmhash.Size) psH = make([]byte, tmhash.Size) ) copy(h, hash) copy(psH, partSetHash) - return types.BlockID{ + return metadata.BlockID{ Hash: h, - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Total: partSetSize, Hash: psH, }, } } -func mutateValidatorSet(privVals []types.MockPV, vals *types.ValidatorSet, -) ([]types.PrivValidator, *types.ValidatorSet, error) { +func mutateValidatorSet(privVals []consensus.MockPV, vals *consensus.ValidatorSet, +) ([]consensus.PrivValidator, *consensus.ValidatorSet, error) { newVal, newPrivVal := factory.RandValidator(false, 10) - var newVals *types.ValidatorSet + var newVals *consensus.ValidatorSet if vals.Size() > 2 { - newVals = types.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) + newVals = consensus.NewValidatorSet(append(vals.Copy().Validators[:vals.Size()-1], newVal)) } else { - newVals = types.NewValidatorSet(append(vals.Copy().Validators, newVal)) + newVals = consensus.NewValidatorSet(append(vals.Copy().Validators, newVal)) } // we need to sort the priv validators with the same index as the validator set - pv := make([]types.PrivValidator, newVals.Size()) + pv := make([]consensus.PrivValidator, newVals.Size()) for idx, val := range newVals.Validators { found := false - for _, p := range append(privVals, newPrivVal.(types.MockPV)) { + for _, p := range append(privVals, newPrivVal.(consensus.MockPV)) { if bytes.Equal(p.PrivKey.PubKey().Address(), val.Address) { pv[idx] = p found = true diff --git a/test/e2e/runner/load.go b/test/e2e/runner/load.go index 518e32564..1fac6bb2c 100644 --- a/test/e2e/runner/load.go +++ b/test/e2e/runner/load.go @@ -8,9 +8,9 @@ import ( "math" "time" + "github.com/tendermint/tendermint/pkg/mempool" rpchttp "github.com/tendermint/tendermint/rpc/client/http" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) // Load generates transactions against the network until the given context is @@ -29,8 +29,8 @@ func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error { initialTimeout := 1 * time.Minute stallTimeout := 30 * time.Second - chTx := make(chan types.Tx) - chSuccess := make(chan types.Tx) + chTx := make(chan mempool.Tx) + chSuccess := make(chan mempool.Tx) ctx, cancel := context.WithCancel(ctx) defer cancel() @@ -66,7 +66,7 @@ func Load(ctx context.Context, testnet *e2e.Testnet, multiplier int) error { } // loadGenerate generates jobs until the context is canceled -func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, size int64) { +func loadGenerate(ctx context.Context, chTx chan<- mempool.Tx, multiplier int, size int64) { for i := 0; i < math.MaxInt64; i++ { // We keep generating the same 100 keys over and over, with different values. // This gives a reasonable load without putting too much data in the app. @@ -77,7 +77,7 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, siz if err != nil { panic(fmt.Sprintf("Failed to read random bytes: %v", err)) } - tx := types.Tx(fmt.Sprintf("load-%X=%x", id, bz)) + tx := mempool.Tx(fmt.Sprintf("load-%X=%x", id, bz)) select { case chTx <- tx: @@ -92,7 +92,7 @@ func loadGenerate(ctx context.Context, chTx chan<- types.Tx, multiplier int, siz } // loadProcess processes transactions -func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan types.Tx, chSuccess chan<- types.Tx) { +func loadProcess(ctx context.Context, testnet *e2e.Testnet, chTx <-chan mempool.Tx, chSuccess chan<- mempool.Tx) { // Each worker gets its own client to each node, which allows for some // concurrency while still bounding it. clients := map[string]*rpchttp.HTTP{} diff --git a/test/e2e/runner/rpc.go b/test/e2e/runner/rpc.go index 52c009caa..ca3a09e45 100644 --- a/test/e2e/runner/rpc.go +++ b/test/e2e/runner/rpc.go @@ -6,16 +6,17 @@ import ( "fmt" "time" + "github.com/tendermint/tendermint/pkg/block" + "github.com/tendermint/tendermint/pkg/metadata" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpctypes "github.com/tendermint/tendermint/rpc/core/types" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) // waitForHeight waits for the network to reach a certain height (or above), // returning the highest height seen. Errors if the network is not making // progress at all. -func waitForHeight(testnet *e2e.Testnet, height int64) (*types.Block, *types.BlockID, error) { +func waitForHeight(testnet *e2e.Testnet, height int64) (*block.Block, *metadata.BlockID, error) { var ( err error maxResult *rpctypes.ResultBlock diff --git a/test/e2e/runner/setup.go b/test/e2e/runner/setup.go index c968ef306..2942e96fd 100644 --- a/test/e2e/runner/setup.go +++ b/test/e2e/runner/setup.go @@ -20,9 +20,10 @@ import ( "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/privval" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) const ( @@ -105,7 +106,7 @@ func Setup(testnet *e2e.Testnet) error { return err } - err = (&types.NodeKey{PrivKey: node.NodeKey}).SaveAs(filepath.Join(nodeDir, "config", "node_key.json")) + err = (&p2p.NodeKey{PrivKey: node.NodeKey}).SaveAs(filepath.Join(nodeDir, "config", "node_key.json")) if err != nil { return err } @@ -185,24 +186,24 @@ services: } // MakeGenesis generates a genesis document. -func MakeGenesis(testnet *e2e.Testnet) (types.GenesisDoc, error) { - genesis := types.GenesisDoc{ +func MakeGenesis(testnet *e2e.Testnet) (consensus.GenesisDoc, error) { + genesis := consensus.GenesisDoc{ GenesisTime: time.Now(), ChainID: testnet.Name, - ConsensusParams: types.DefaultConsensusParams(), + ConsensusParams: consensus.DefaultConsensusParams(), InitialHeight: testnet.InitialHeight, } switch testnet.KeyType { - case "", types.ABCIPubKeyTypeEd25519, types.ABCIPubKeyTypeSecp256k1: + case "", consensus.ABCIPubKeyTypeEd25519, consensus.ABCIPubKeyTypeSecp256k1: genesis.ConsensusParams.Validator.PubKeyTypes = - append(genesis.ConsensusParams.Validator.PubKeyTypes, types.ABCIPubKeyTypeSecp256k1) + append(genesis.ConsensusParams.Validator.PubKeyTypes, consensus.ABCIPubKeyTypeSecp256k1) default: return genesis, errors.New("unsupported KeyType") } genesis.ConsensusParams.Evidence.MaxAgeNumBlocks = e2e.EvidenceAgeHeight genesis.ConsensusParams.Evidence.MaxAgeDuration = e2e.EvidenceAgeTime for validator, power := range testnet.Validators { - genesis.Validators = append(genesis.Validators, types.GenesisValidator{ + genesis.Validators = append(genesis.Validators, consensus.GenesisValidator{ Name: validator.Name, Address: validator.PrivvalKey.PubKey().Address(), PubKey: validator.PrivvalKey.PubKey(), diff --git a/test/e2e/tests/app_test.go b/test/e2e/tests/app_test.go index 08710f168..9fc4ffe15 100644 --- a/test/e2e/tests/app_test.go +++ b/test/e2e/tests/app_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/pkg/mempool" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) // Tests that any initial state given in genesis has made it into the app. @@ -80,7 +80,7 @@ func TestApp_Tx(t *testing.T) { key := fmt.Sprintf("testapp-tx-%v", node.Name) value := fmt.Sprintf("%x", bz) - tx := types.Tx(fmt.Sprintf("%v=%v", key, value)) + tx := mempool.Tx(fmt.Sprintf("%v=%v", key, value)) _, err = client.BroadcastTxSync(ctx, tx) require.NoError(t, err) diff --git a/test/e2e/tests/e2e_test.go b/test/e2e/tests/e2e_test.go index 15c747b5b..c87cac6eb 100644 --- a/test/e2e/tests/e2e_test.go +++ b/test/e2e/tests/e2e_test.go @@ -9,10 +9,10 @@ import ( "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/pkg/block" rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpctypes "github.com/tendermint/tendermint/rpc/core/types" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) func init() { @@ -26,7 +26,7 @@ var ( ctx = context.Background() testnetCache = map[string]e2e.Testnet{} testnetCacheMtx = sync.Mutex{} - blocksCache = map[string][]*types.Block{} + blocksCache = map[string][]*block.Block{} blocksCacheMtx = sync.Mutex{} ) @@ -84,7 +84,7 @@ func loadTestnet(t *testing.T) e2e.Testnet { // fetchBlockChain fetches a complete, up-to-date block history from // the freshest testnet archive node. -func fetchBlockChain(t *testing.T) []*types.Block { +func fetchBlockChain(t *testing.T) []*block.Block { t.Helper() testnet := loadTestnet(t) @@ -115,7 +115,7 @@ func fetchBlockChain(t *testing.T) []*types.Block { to := status.SyncInfo.LatestBlockHeight blocks, ok := blocksCache[testnet.Name] if !ok { - blocks = make([]*types.Block, 0, to-from+1) + blocks = make([]*block.Block, 0, to-from+1) } if len(blocks) > 0 { from = blocks[len(blocks)-1].Height + 1 diff --git a/test/e2e/tests/validator_test.go b/test/e2e/tests/validator_test.go index 847a8d388..f181a5784 100644 --- a/test/e2e/tests/validator_test.go +++ b/test/e2e/tests/validator_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/pkg/consensus" e2e "github.com/tendermint/tendermint/test/e2e/pkg" - "github.com/tendermint/tendermint/types" ) // Tests that validator sets are available and correct according to @@ -35,7 +35,7 @@ func TestValidator_Sets(t *testing.T) { valSchedule.Increment(first - node.Testnet.InitialHeight) for h := first; h <= last; h++ { - validators := []*types.Validator{} + validators := []*consensus.Validator{} perPage := 100 for page := 1; ; page++ { resp, err := client.Validators(ctx, &(h), &(page), &perPage) @@ -126,7 +126,7 @@ func TestValidator_Sign(t *testing.T) { // validatorSchedule is a validator set iterator, which takes into account // validator set updates. type validatorSchedule struct { - Set *types.ValidatorSet + Set *consensus.ValidatorSet height int64 updates map[int64]map[*e2e.Node]int64 } @@ -138,7 +138,7 @@ func newValidatorSchedule(testnet e2e.Testnet) *validatorSchedule { } return &validatorSchedule{ height: testnet.InitialHeight, - Set: types.NewValidatorSet(makeVals(valMap)), + Set: consensus.NewValidatorSet(makeVals(valMap)), updates: testnet.ValidatorUpdates, } } @@ -159,10 +159,10 @@ func (s *validatorSchedule) Increment(heights int64) { } } -func makeVals(valMap map[*e2e.Node]int64) []*types.Validator { - vals := make([]*types.Validator, 0, len(valMap)) +func makeVals(valMap map[*e2e.Node]int64) []*consensus.Validator { + vals := make([]*consensus.Validator, 0, len(valMap)) for node, power := range valMap { - vals = append(vals, types.NewValidator(node.PrivvalKey.PubKey(), power)) + vals = append(vals, consensus.NewValidator(node.PrivvalKey.PubKey(), power)) } return vals } diff --git a/test/fuzz/p2p/addrbook/init-corpus/main.go b/test/fuzz/p2p/addrbook/init-corpus/main.go index 1166f9bd7..c075342a1 100644 --- a/test/fuzz/p2p/addrbook/init-corpus/main.go +++ b/test/fuzz/p2p/addrbook/init-corpus/main.go @@ -13,7 +13,7 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" - "github.com/tendermint/tendermint/types" + types "github.com/tendermint/tendermint/pkg/p2p" ) func main() { diff --git a/test/fuzz/p2p/pex/init-corpus/main.go b/test/fuzz/p2p/pex/init-corpus/main.go index e90216864..a7aea90e2 100644 --- a/test/fuzz/p2p/pex/init-corpus/main.go +++ b/test/fuzz/p2p/pex/init-corpus/main.go @@ -13,8 +13,8 @@ import ( "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/internal/p2p" "github.com/tendermint/tendermint/internal/p2p/pex" + types "github.com/tendermint/tendermint/pkg/p2p" tmp2p "github.com/tendermint/tendermint/proto/tendermint/p2p" - "github.com/tendermint/tendermint/types" ) func main() { diff --git a/test/fuzz/p2p/pex/reactor_receive.go b/test/fuzz/p2p/pex/reactor_receive.go index 388361a4e..138551c22 100644 --- a/test/fuzz/p2p/pex/reactor_receive.go +++ b/test/fuzz/p2p/pex/reactor_receive.go @@ -9,7 +9,7 @@ import ( "github.com/tendermint/tendermint/internal/p2p/pex" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/libs/service" - "github.com/tendermint/tendermint/types" + types "github.com/tendermint/tendermint/pkg/p2p" "github.com/tendermint/tendermint/version" ) diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index 4d333949a..6f98b3723 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -10,6 +10,8 @@ import ( "time" "github.com/tendermint/tendermint/crypto/tmhash" + "github.com/tendermint/tendermint/pkg/consensus" + "github.com/tendermint/tendermint/pkg/metadata" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/privval" @@ -19,7 +21,6 @@ import ( tmnet "github.com/tendermint/tendermint/libs/net" tmos "github.com/tendermint/tendermint/libs/os" tmproto "github.com/tendermint/tendermint/proto/tendermint/types" - "github.com/tendermint/tendermint/types" ) // Test harness error codes (which act as exit codes when the test harness fails). @@ -219,14 +220,14 @@ func (th *TestHarness) TestSignProposal() error { th.logger.Info("TEST: Signing of proposals") // sha256 hash of "hash" hash := tmhash.Sum([]byte("hash")) - prop := &types.Proposal{ + prop := &consensus.Proposal{ Type: tmproto.ProposalType, Height: 100, Round: 0, POLRound: -1, - BlockID: types.BlockID{ + BlockID: metadata.BlockID{ Hash: hash, - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Hash: hash, Total: 1000000, }, @@ -234,7 +235,7 @@ func (th *TestHarness) TestSignProposal() error { Timestamp: time.Now(), } p := prop.ToProto() - propBytes := types.ProposalSignBytes(th.chainID, p) + propBytes := consensus.ProposalSignBytes(th.chainID, p) if err := th.signerClient.SignProposal(context.Background(), th.chainID, p); err != nil { th.logger.Error("FAILED: Signing of proposal", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") @@ -267,13 +268,13 @@ func (th *TestHarness) TestSignVote() error { for _, voteType := range voteTypes { th.logger.Info("Testing vote type", "type", voteType) hash := tmhash.Sum([]byte("hash")) - vote := &types.Vote{ + vote := &consensus.Vote{ Type: voteType, Height: 101, Round: 0, - BlockID: types.BlockID{ + BlockID: metadata.BlockID{ Hash: hash, - PartSetHeader: types.PartSetHeader{ + PartSetHeader: metadata.PartSetHeader{ Hash: hash, Total: 1000000, }, @@ -283,7 +284,7 @@ func (th *TestHarness) TestSignVote() error { Timestamp: time.Now(), } v := vote.ToProto() - voteBytes := types.VoteSignBytes(th.chainID, v) + voteBytes := consensus.VoteSignBytes(th.chainID, v) // sign the vote if err := th.signerClient.SignVote(context.Background(), th.chainID, v); err != nil { th.logger.Error("FAILED: Signing of vote", "err", err) diff --git a/tools/tm-signer-harness/internal/test_harness_test.go b/tools/tm-signer-harness/internal/test_harness_test.go index cf22bc836..14508c221 100644 --- a/tools/tm-signer-harness/internal/test_harness_test.go +++ b/tools/tm-signer-harness/internal/test_harness_test.go @@ -13,8 +13,8 @@ import ( "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/pkg/consensus" "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" ) const ( @@ -130,7 +130,7 @@ func newMockSignerServer( breakProposalSigning bool, breakVoteSigning bool, ) *privval.SignerServer { - mockPV := types.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning) + mockPV := consensus.NewMockPVWithParams(privKey, breakProposalSigning, breakVoteSigning) dialerEndpoint := privval.NewSignerDialerEndpoint( th.logger, diff --git a/types/results.go b/types/results.go deleted file mode 100644 index 9181450bc..000000000 --- a/types/results.go +++ /dev/null @@ -1,54 +0,0 @@ -package types - -import ( - abci "github.com/tendermint/tendermint/abci/types" - "github.com/tendermint/tendermint/crypto/merkle" -) - -// ABCIResults wraps the deliver tx results to return a proof. -type ABCIResults []*abci.ResponseDeliverTx - -// NewResults strips non-deterministic fields from ResponseDeliverTx responses -// and returns ABCIResults. -func NewResults(responses []*abci.ResponseDeliverTx) ABCIResults { - res := make(ABCIResults, len(responses)) - for i, d := range responses { - res[i] = deterministicResponseDeliverTx(d) - } - return res -} - -// Hash returns a merkle hash of all results. -func (a ABCIResults) Hash() []byte { - return merkle.HashFromByteSlices(a.toByteSlices()) -} - -// ProveResult returns a merkle proof of one result from the set -func (a ABCIResults) ProveResult(i int) merkle.Proof { - _, proofs := merkle.ProofsFromByteSlices(a.toByteSlices()) - return *proofs[i] -} - -func (a ABCIResults) toByteSlices() [][]byte { - l := len(a) - bzs := make([][]byte, l) - for i := 0; i < l; i++ { - bz, err := a[i].Marshal() - if err != nil { - panic(err) - } - bzs[i] = bz - } - return bzs -} - -// deterministicResponseDeliverTx strips non-deterministic fields from -// ResponseDeliverTx and returns another ResponseDeliverTx. -func deterministicResponseDeliverTx(response *abci.ResponseDeliverTx) *abci.ResponseDeliverTx { - return &abci.ResponseDeliverTx{ - Code: response.Code, - Data: response.Data, - GasWanted: response.GasWanted, - GasUsed: response.GasUsed, - } -} diff --git a/types/results_test.go b/types/results_test.go deleted file mode 100644 index 5b1be3466..000000000 --- a/types/results_test.go +++ /dev/null @@ -1,54 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/tendermint/abci/types" -) - -func TestABCIResults(t *testing.T) { - a := &abci.ResponseDeliverTx{Code: 0, Data: nil} - b := &abci.ResponseDeliverTx{Code: 0, Data: []byte{}} - c := &abci.ResponseDeliverTx{Code: 0, Data: []byte("one")} - d := &abci.ResponseDeliverTx{Code: 14, Data: nil} - e := &abci.ResponseDeliverTx{Code: 14, Data: []byte("foo")} - f := &abci.ResponseDeliverTx{Code: 14, Data: []byte("bar")} - - // Nil and []byte{} should produce the same bytes - bzA, err := a.Marshal() - require.NoError(t, err) - bzB, err := b.Marshal() - require.NoError(t, err) - - require.Equal(t, bzA, bzB) - - // a and b should be the same, don't go in results. - results := ABCIResults{a, c, d, e, f} - - // Make sure each result serializes differently - last := []byte{} - assert.Equal(t, last, bzA) // first one is empty - for i, res := range results[1:] { - bz, err := res.Marshal() - require.NoError(t, err) - - assert.NotEqual(t, last, bz, "%d", i) - last = bz - } - - // Make sure that we can get a root hash from results and verify proofs. - root := results.Hash() - assert.NotEmpty(t, root) - - for i, res := range results { - bz, err := res.Marshal() - require.NoError(t, err) - - proof := results.ProveResult(i) - valid := proof.Verify(root, bz) - assert.NoError(t, valid, "%d", i) - } -}