diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index d861a9168..6ed6f2055 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -14,6 +14,8 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi - Apps + - [abci] [\#4704](https://github.com/tendermint/tendermint/pull/4704) Add ABCI methods `ListSnapshots`, `LoadSnapshotChunk`, `OfferSnapshot`, and `ApplySnapshotChunk` for state sync snapshots. `ABCIVersion` bumped to 0.17.0. + - P2P Protocol - Go API @@ -25,6 +27,7 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi ### FEATURES: +- [statesync] Add state sync support, where a new node can be rapidly bootstrapped by fetching state snapshots from peers instead of replaying blocks. See the `[statesync]` config section. - [evidence] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Handle evidence from light clients (@melekes) - [lite2] [\#4532](https://github.com/tendermint/tendermint/pull/4532) Submit conflicting headers, if any, to a full node & all witnesses (@melekes) @@ -41,4 +44,6 @@ Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermi ### BUG FIXES: +- [blockchain/v2] [\#4761](https://github.com/tendermint/tendermint/pull/4761) Fix excessive CPU usage caused by spinning on closed channels (@erikgrinaker) +- [blockchain/v2] Respect `fast_sync` option (@erikgrinaker) - [light] [\#4741](https://github.com/tendermint/tendermint/pull/4741) Correctly return `ErrSignedHeaderNotFound` and `ErrValidatorSetNotFound` on corresponding RPC errors (@erikgrinaker) diff --git a/abci/client/client.go b/abci/client/client.go index 4f7c7b69a..b65ce1993 100644 --- a/abci/client/client.go +++ b/abci/client/client.go @@ -35,6 +35,10 @@ type Client interface { InitChainAsync(types.RequestInitChain) *ReqRes BeginBlockAsync(types.RequestBeginBlock) *ReqRes EndBlockAsync(types.RequestEndBlock) *ReqRes + ListSnapshotsAsync(types.RequestListSnapshots) *ReqRes + OfferSnapshotAsync(types.RequestOfferSnapshot) *ReqRes + LoadSnapshotChunkAsync(types.RequestLoadSnapshotChunk) *ReqRes + ApplySnapshotChunkAsync(types.RequestApplySnapshotChunk) *ReqRes FlushSync() error EchoSync(msg string) (*types.ResponseEcho, error) @@ -47,6 +51,10 @@ type Client interface { InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) + ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) } //---------------------------------------- diff --git a/abci/client/grpc_client.go b/abci/client/grpc_client.go index 01583bc1f..9d8e9ca3a 100644 --- a/abci/client/grpc_client.go +++ b/abci/client/grpc_client.go @@ -223,6 +223,42 @@ func (cli *grpcClient) EndBlockAsync(params types.RequestEndBlock) *ReqRes { return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_EndBlock{EndBlock: res}}) } +func (cli *grpcClient) ListSnapshotsAsync(params types.RequestListSnapshots) *ReqRes { + req := types.ToRequestListSnapshots(params) + res, err := cli.client.ListSnapshots(context.Background(), req.GetListSnapshots(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ListSnapshots{ListSnapshots: res}}) +} + +func (cli *grpcClient) OfferSnapshotAsync(params types.RequestOfferSnapshot) *ReqRes { + req := types.ToRequestOfferSnapshot(params) + res, err := cli.client.OfferSnapshot(context.Background(), req.GetOfferSnapshot(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_OfferSnapshot{OfferSnapshot: res}}) +} + +func (cli *grpcClient) LoadSnapshotChunkAsync(params types.RequestLoadSnapshotChunk) *ReqRes { + req := types.ToRequestLoadSnapshotChunk(params) + res, err := cli.client.LoadSnapshotChunk(context.Background(), req.GetLoadSnapshotChunk(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_LoadSnapshotChunk{LoadSnapshotChunk: res}}) +} + +func (cli *grpcClient) ApplySnapshotChunkAsync(params types.RequestApplySnapshotChunk) *ReqRes { + req := types.ToRequestApplySnapshotChunk(params) + res, err := cli.client.ApplySnapshotChunk(context.Background(), req.GetApplySnapshotChunk(), grpc.WaitForReady(true)) + if err != nil { + cli.StopForError(err) + } + return cli.finishAsyncCall(req, &types.Response{Value: &types.Response_ApplySnapshotChunk{ApplySnapshotChunk: res}}) +} + func (cli *grpcClient) finishAsyncCall(req *types.Request, res *types.Response) *ReqRes { reqres := NewReqRes(req) reqres.Response = res // Set response @@ -304,3 +340,25 @@ func (cli *grpcClient) EndBlockSync(params types.RequestEndBlock) (*types.Respon reqres := cli.EndBlockAsync(params) return reqres.Response.GetEndBlock(), cli.Error() } + +func (cli *grpcClient) ListSnapshotsSync(params types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + reqres := cli.ListSnapshotsAsync(params) + return reqres.Response.GetListSnapshots(), cli.Error() +} + +func (cli *grpcClient) OfferSnapshotSync(params types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + reqres := cli.OfferSnapshotAsync(params) + return reqres.Response.GetOfferSnapshot(), cli.Error() +} + +func (cli *grpcClient) LoadSnapshotChunkSync( + params types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + reqres := cli.LoadSnapshotChunkAsync(params) + return reqres.Response.GetLoadSnapshotChunk(), cli.Error() +} + +func (cli *grpcClient) ApplySnapshotChunkSync( + params types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + reqres := cli.ApplySnapshotChunkAsync(params) + return reqres.Response.GetApplySnapshotChunk(), cli.Error() +} diff --git a/abci/client/local_client.go b/abci/client/local_client.go index 3946bfbc5..ccd407d07 100644 --- a/abci/client/local_client.go +++ b/abci/client/local_client.go @@ -158,6 +158,50 @@ func (app *localClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { ) } +func (app *localClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ListSnapshots(req) + return app.callback( + types.ToRequestListSnapshots(req), + types.ToResponseListSnapshots(res), + ) +} + +func (app *localClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.OfferSnapshot(req) + return app.callback( + types.ToRequestOfferSnapshot(req), + types.ToResponseOfferSnapshot(res), + ) +} + +func (app *localClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.LoadSnapshotChunk(req) + return app.callback( + types.ToRequestLoadSnapshotChunk(req), + types.ToResponseLoadSnapshotChunk(res), + ) +} + +func (app *localClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ApplySnapshotChunk(req) + return app.callback( + types.ToRequestApplySnapshotChunk(req), + types.ToResponseApplySnapshotChunk(res), + ) +} + //------------------------------------------------------- func (app *localClient) FlushSync() error { @@ -240,6 +284,40 @@ func (app *localClient) EndBlockSync(req types.RequestEndBlock) (*types.Response return &res, nil } +func (app *localClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ListSnapshots(req) + return &res, nil +} + +func (app *localClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.OfferSnapshot(req) + return &res, nil +} + +func (app *localClient) LoadSnapshotChunkSync( + req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.LoadSnapshotChunk(req) + return &res, nil +} + +func (app *localClient) ApplySnapshotChunkSync( + req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + app.mtx.Lock() + defer app.mtx.Unlock() + + res := app.Application.ApplySnapshotChunk(req) + return &res, nil +} + //------------------------------------------------------- func (app *localClient) callback(req *types.Request, res *types.Response) *ReqRes { diff --git a/abci/client/socket_client.go b/abci/client/socket_client.go index 7898a8f26..6b183a189 100644 --- a/abci/client/socket_client.go +++ b/abci/client/socket_client.go @@ -266,6 +266,22 @@ func (cli *socketClient) EndBlockAsync(req types.RequestEndBlock) *ReqRes { return cli.queueRequest(types.ToRequestEndBlock(req)) } +func (cli *socketClient) ListSnapshotsAsync(req types.RequestListSnapshots) *ReqRes { + return cli.queueRequest(types.ToRequestListSnapshots(req)) +} + +func (cli *socketClient) OfferSnapshotAsync(req types.RequestOfferSnapshot) *ReqRes { + return cli.queueRequest(types.ToRequestOfferSnapshot(req)) +} + +func (cli *socketClient) LoadSnapshotChunkAsync(req types.RequestLoadSnapshotChunk) *ReqRes { + return cli.queueRequest(types.ToRequestLoadSnapshotChunk(req)) +} + +func (cli *socketClient) ApplySnapshotChunkAsync(req types.RequestApplySnapshotChunk) *ReqRes { + return cli.queueRequest(types.ToRequestApplySnapshotChunk(req)) +} + //---------------------------------------- func (cli *socketClient) FlushSync() error { @@ -337,6 +353,32 @@ func (cli *socketClient) EndBlockSync(req types.RequestEndBlock) (*types.Respons return reqres.Response.GetEndBlock(), cli.Error() } +func (cli *socketClient) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + reqres := cli.queueRequest(types.ToRequestListSnapshots(req)) + cli.FlushSync() + return reqres.Response.GetListSnapshots(), cli.Error() +} + +func (cli *socketClient) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + reqres := cli.queueRequest(types.ToRequestOfferSnapshot(req)) + cli.FlushSync() + return reqres.Response.GetOfferSnapshot(), cli.Error() +} + +func (cli *socketClient) LoadSnapshotChunkSync( + req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + reqres := cli.queueRequest(types.ToRequestLoadSnapshotChunk(req)) + cli.FlushSync() + return reqres.Response.GetLoadSnapshotChunk(), cli.Error() +} + +func (cli *socketClient) ApplySnapshotChunkSync( + req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + reqres := cli.queueRequest(types.ToRequestApplySnapshotChunk(req)) + cli.FlushSync() + return reqres.Response.GetApplySnapshotChunk(), cli.Error() +} + //---------------------------------------- func (cli *socketClient) queueRequest(req *types.Request) *ReqRes { diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index fffc617be..90eb337c3 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -144,6 +144,26 @@ func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) typ return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} } +func (app *PersistentKVStoreApplication) ListSnapshots( + req types.RequestListSnapshots) types.ResponseListSnapshots { + return types.ResponseListSnapshots{} +} + +func (app *PersistentKVStoreApplication) LoadSnapshotChunk( + req types.RequestLoadSnapshotChunk) types.ResponseLoadSnapshotChunk { + return types.ResponseLoadSnapshotChunk{} +} + +func (app *PersistentKVStoreApplication) OfferSnapshot( + req types.RequestOfferSnapshot) types.ResponseOfferSnapshot { + return types.ResponseOfferSnapshot{Result: types.ResponseOfferSnapshot_abort} +} + +func (app *PersistentKVStoreApplication) ApplySnapshotChunk( + req types.RequestApplySnapshotChunk) types.ResponseApplySnapshotChunk { + return types.ResponseApplySnapshotChunk{Result: types.ResponseApplySnapshotChunk_abort} +} + //--------------------------------------------- // update validators diff --git a/abci/server/socket_server.go b/abci/server/socket_server.go index e68d79599..20090b768 100644 --- a/abci/server/socket_server.go +++ b/abci/server/socket_server.go @@ -224,6 +224,18 @@ func (s *SocketServer) handleRequest(req *types.Request, responses chan<- *types case *types.Request_EndBlock: res := s.app.EndBlock(*r.EndBlock) responses <- types.ToResponseEndBlock(res) + case *types.Request_ListSnapshots: + res := s.app.ListSnapshots(*r.ListSnapshots) + responses <- types.ToResponseListSnapshots(res) + case *types.Request_OfferSnapshot: + res := s.app.OfferSnapshot(*r.OfferSnapshot) + responses <- types.ToResponseOfferSnapshot(res) + case *types.Request_LoadSnapshotChunk: + res := s.app.LoadSnapshotChunk(*r.LoadSnapshotChunk) + responses <- types.ToResponseLoadSnapshotChunk(res) + case *types.Request_ApplySnapshotChunk: + res := s.app.ApplySnapshotChunk(*r.ApplySnapshotChunk) + responses <- types.ToResponseApplySnapshotChunk(res) default: responses <- types.ToResponseException("Unknown request") } diff --git a/abci/types/application.go b/abci/types/application.go index 9dd77c4ef..51edffbd4 100644 --- a/abci/types/application.go +++ b/abci/types/application.go @@ -23,6 +23,12 @@ type Application interface { DeliverTx(RequestDeliverTx) ResponseDeliverTx // Deliver a tx for full processing EndBlock(RequestEndBlock) ResponseEndBlock // Signals the end of a block, returns changes to the validator set Commit() ResponseCommit // Commit the state and return the application Merkle root hash + + // State Sync Connection + ListSnapshots(RequestListSnapshots) ResponseListSnapshots // List available snapshots + OfferSnapshot(RequestOfferSnapshot) ResponseOfferSnapshot // Offer a snapshot to the application + LoadSnapshotChunk(RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk // Load a snapshot chunk + ApplySnapshotChunk(RequestApplySnapshotChunk) ResponseApplySnapshotChunk // Apply a shapshot chunk } //------------------------------------------------------- @@ -73,6 +79,22 @@ func (BaseApplication) EndBlock(req RequestEndBlock) ResponseEndBlock { return ResponseEndBlock{} } +func (BaseApplication) ListSnapshots(req RequestListSnapshots) ResponseListSnapshots { + return ResponseListSnapshots{} +} + +func (BaseApplication) OfferSnapshot(req RequestOfferSnapshot) ResponseOfferSnapshot { + return ResponseOfferSnapshot{} +} + +func (BaseApplication) LoadSnapshotChunk(req RequestLoadSnapshotChunk) ResponseLoadSnapshotChunk { + return ResponseLoadSnapshotChunk{} +} + +func (BaseApplication) ApplySnapshotChunk(req RequestApplySnapshotChunk) ResponseApplySnapshotChunk { + return ResponseApplySnapshotChunk{} +} + //------------------------------------------------------- // GRPCApplication is a GRPC wrapper for Application @@ -136,3 +158,27 @@ func (app *GRPCApplication) EndBlock(ctx context.Context, req *RequestEndBlock) res := app.app.EndBlock(*req) return &res, nil } + +func (app *GRPCApplication) ListSnapshots( + ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { + res := app.app.ListSnapshots(*req) + return &res, nil +} + +func (app *GRPCApplication) OfferSnapshot( + ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { + res := app.app.OfferSnapshot(*req) + return &res, nil +} + +func (app *GRPCApplication) LoadSnapshotChunk( + ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { + res := app.app.LoadSnapshotChunk(*req) + return &res, nil +} + +func (app *GRPCApplication) ApplySnapshotChunk( + ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { + res := app.app.ApplySnapshotChunk(*req) + return &res, nil +} diff --git a/abci/types/messages.go b/abci/types/messages.go index ad18727a8..531f75fed 100644 --- a/abci/types/messages.go +++ b/abci/types/messages.go @@ -135,6 +135,30 @@ func ToRequestEndBlock(req RequestEndBlock) *Request { } } +func ToRequestListSnapshots(req RequestListSnapshots) *Request { + return &Request{ + Value: &Request_ListSnapshots{&req}, + } +} + +func ToRequestOfferSnapshot(req RequestOfferSnapshot) *Request { + return &Request{ + Value: &Request_OfferSnapshot{&req}, + } +} + +func ToRequestLoadSnapshotChunk(req RequestLoadSnapshotChunk) *Request { + return &Request{ + Value: &Request_LoadSnapshotChunk{&req}, + } +} + +func ToRequestApplySnapshotChunk(req RequestApplySnapshotChunk) *Request { + return &Request{ + Value: &Request_ApplySnapshotChunk{&req}, + } +} + //---------------------------------------- func ToResponseException(errStr string) *Response { @@ -208,3 +232,27 @@ func ToResponseEndBlock(res ResponseEndBlock) *Response { Value: &Response_EndBlock{&res}, } } + +func ToResponseListSnapshots(res ResponseListSnapshots) *Response { + return &Response{ + Value: &Response_ListSnapshots{&res}, + } +} + +func ToResponseOfferSnapshot(res ResponseOfferSnapshot) *Response { + return &Response{ + Value: &Response_OfferSnapshot{&res}, + } +} + +func ToResponseLoadSnapshotChunk(res ResponseLoadSnapshotChunk) *Response { + return &Response{ + Value: &Response_LoadSnapshotChunk{&res}, + } +} + +func ToResponseApplySnapshotChunk(res ResponseApplySnapshotChunk) *Response { + return &Response{ + Value: &Response_ApplySnapshotChunk{&res}, + } +} diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 09bf63cb7..84263943b 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -61,6 +61,74 @@ func (CheckTxType) EnumDescriptor() ([]byte, []int) { return fileDescriptor_9f1eaa49c51fa1ac, []int{0} } +type ResponseOfferSnapshot_Result int32 + +const ( + ResponseOfferSnapshot_accept ResponseOfferSnapshot_Result = 0 + ResponseOfferSnapshot_abort ResponseOfferSnapshot_Result = 1 + ResponseOfferSnapshot_reject ResponseOfferSnapshot_Result = 2 + ResponseOfferSnapshot_reject_format ResponseOfferSnapshot_Result = 3 + ResponseOfferSnapshot_reject_sender ResponseOfferSnapshot_Result = 4 +) + +var ResponseOfferSnapshot_Result_name = map[int32]string{ + 0: "accept", + 1: "abort", + 2: "reject", + 3: "reject_format", + 4: "reject_sender", +} + +var ResponseOfferSnapshot_Result_value = map[string]int32{ + "accept": 0, + "abort": 1, + "reject": 2, + "reject_format": 3, + "reject_sender": 4, +} + +func (x ResponseOfferSnapshot_Result) String() string { + return proto.EnumName(ResponseOfferSnapshot_Result_name, int32(x)) +} + +func (ResponseOfferSnapshot_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{30, 0} +} + +type ResponseApplySnapshotChunk_Result int32 + +const ( + ResponseApplySnapshotChunk_accept ResponseApplySnapshotChunk_Result = 0 + ResponseApplySnapshotChunk_abort ResponseApplySnapshotChunk_Result = 1 + ResponseApplySnapshotChunk_retry ResponseApplySnapshotChunk_Result = 2 + ResponseApplySnapshotChunk_retry_snapshot ResponseApplySnapshotChunk_Result = 3 + ResponseApplySnapshotChunk_reject_snapshot ResponseApplySnapshotChunk_Result = 4 +) + +var ResponseApplySnapshotChunk_Result_name = map[int32]string{ + 0: "accept", + 1: "abort", + 2: "retry", + 3: "retry_snapshot", + 4: "reject_snapshot", +} + +var ResponseApplySnapshotChunk_Result_value = map[string]int32{ + "accept": 0, + "abort": 1, + "retry": 2, + "retry_snapshot": 3, + "reject_snapshot": 4, +} + +func (x ResponseApplySnapshotChunk_Result) String() string { + return proto.EnumName(ResponseApplySnapshotChunk_Result_name, int32(x)) +} + +func (ResponseApplySnapshotChunk_Result) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{32, 0} +} + type Request struct { // Types that are valid to be assigned to Value: // *Request_Echo @@ -74,6 +142,10 @@ type Request struct { // *Request_DeliverTx // *Request_EndBlock // *Request_Commit + // *Request_ListSnapshots + // *Request_OfferSnapshot + // *Request_LoadSnapshotChunk + // *Request_ApplySnapshotChunk Value isRequest_Value `protobuf_oneof:"value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -153,18 +225,34 @@ type Request_EndBlock struct { type Request_Commit struct { Commit *RequestCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } +type Request_ListSnapshots struct { + ListSnapshots *RequestListSnapshots `protobuf:"bytes,13,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Request_OfferSnapshot struct { + OfferSnapshot *RequestOfferSnapshot `protobuf:"bytes,14,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Request_LoadSnapshotChunk struct { + LoadSnapshotChunk *RequestLoadSnapshotChunk `protobuf:"bytes,15,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Request_ApplySnapshotChunk struct { + ApplySnapshotChunk *RequestApplySnapshotChunk `protobuf:"bytes,16,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} -func (*Request_Echo) isRequest_Value() {} -func (*Request_Flush) isRequest_Value() {} -func (*Request_Info) isRequest_Value() {} -func (*Request_SetOption) isRequest_Value() {} -func (*Request_InitChain) isRequest_Value() {} -func (*Request_Query) isRequest_Value() {} -func (*Request_BeginBlock) isRequest_Value() {} -func (*Request_CheckTx) isRequest_Value() {} -func (*Request_DeliverTx) isRequest_Value() {} -func (*Request_EndBlock) isRequest_Value() {} -func (*Request_Commit) isRequest_Value() {} +func (*Request_Echo) isRequest_Value() {} +func (*Request_Flush) isRequest_Value() {} +func (*Request_Info) isRequest_Value() {} +func (*Request_SetOption) isRequest_Value() {} +func (*Request_InitChain) isRequest_Value() {} +func (*Request_Query) isRequest_Value() {} +func (*Request_BeginBlock) isRequest_Value() {} +func (*Request_CheckTx) isRequest_Value() {} +func (*Request_DeliverTx) isRequest_Value() {} +func (*Request_EndBlock) isRequest_Value() {} +func (*Request_Commit) isRequest_Value() {} +func (*Request_ListSnapshots) isRequest_Value() {} +func (*Request_OfferSnapshot) isRequest_Value() {} +func (*Request_LoadSnapshotChunk) isRequest_Value() {} +func (*Request_ApplySnapshotChunk) isRequest_Value() {} func (m *Request) GetValue() isRequest_Value { if m != nil { @@ -250,6 +338,34 @@ func (m *Request) GetCommit() *RequestCommit { return nil } +func (m *Request) GetListSnapshots() *RequestListSnapshots { + if x, ok := m.GetValue().(*Request_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Request) GetOfferSnapshot() *RequestOfferSnapshot { + if x, ok := m.GetValue().(*Request_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Request) GetLoadSnapshotChunk() *RequestLoadSnapshotChunk { + if x, ok := m.GetValue().(*Request_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Request) GetApplySnapshotChunk() *RequestApplySnapshotChunk { + if x, ok := m.GetValue().(*Request_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Request) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -264,6 +380,10 @@ func (*Request) XXX_OneofWrappers() []interface{} { (*Request_DeliverTx)(nil), (*Request_EndBlock)(nil), (*Request_Commit)(nil), + (*Request_ListSnapshots)(nil), + (*Request_OfferSnapshot)(nil), + (*Request_LoadSnapshotChunk)(nil), + (*Request_ApplySnapshotChunk)(nil), } } @@ -881,6 +1001,230 @@ func (m *RequestCommit) XXX_DiscardUnknown() { var xxx_messageInfo_RequestCommit proto.InternalMessageInfo +// lists available snapshots +type RequestListSnapshots struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestListSnapshots) Reset() { *m = RequestListSnapshots{} } +func (m *RequestListSnapshots) String() string { return proto.CompactTextString(m) } +func (*RequestListSnapshots) ProtoMessage() {} +func (*RequestListSnapshots) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{12} +} +func (m *RequestListSnapshots) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestListSnapshots.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestListSnapshots) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestListSnapshots.Merge(m, src) +} +func (m *RequestListSnapshots) XXX_Size() int { + return m.Size() +} +func (m *RequestListSnapshots) XXX_DiscardUnknown() { + xxx_messageInfo_RequestListSnapshots.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestListSnapshots proto.InternalMessageInfo + +// offers a snapshot to the application +type RequestOfferSnapshot struct { + Snapshot *Snapshot `protobuf:"bytes,1,opt,name=snapshot,proto3" json:"snapshot,omitempty"` + AppHash []byte `protobuf:"bytes,2,opt,name=app_hash,json=appHash,proto3" json:"app_hash,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestOfferSnapshot) Reset() { *m = RequestOfferSnapshot{} } +func (m *RequestOfferSnapshot) String() string { return proto.CompactTextString(m) } +func (*RequestOfferSnapshot) ProtoMessage() {} +func (*RequestOfferSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{13} +} +func (m *RequestOfferSnapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestOfferSnapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestOfferSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestOfferSnapshot.Merge(m, src) +} +func (m *RequestOfferSnapshot) XXX_Size() int { + return m.Size() +} +func (m *RequestOfferSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_RequestOfferSnapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestOfferSnapshot proto.InternalMessageInfo + +func (m *RequestOfferSnapshot) GetSnapshot() *Snapshot { + if m != nil { + return m.Snapshot + } + return nil +} + +func (m *RequestOfferSnapshot) GetAppHash() []byte { + if m != nil { + return m.AppHash + } + return nil +} + +// loads a snapshot chunk +type RequestLoadSnapshotChunk struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunk uint32 `protobuf:"varint,3,opt,name=chunk,proto3" json:"chunk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestLoadSnapshotChunk) Reset() { *m = RequestLoadSnapshotChunk{} } +func (m *RequestLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*RequestLoadSnapshotChunk) ProtoMessage() {} +func (*RequestLoadSnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{14} +} +func (m *RequestLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestLoadSnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestLoadSnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestLoadSnapshotChunk.Merge(m, src) +} +func (m *RequestLoadSnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *RequestLoadSnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_RequestLoadSnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestLoadSnapshotChunk proto.InternalMessageInfo + +func (m *RequestLoadSnapshotChunk) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *RequestLoadSnapshotChunk) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *RequestLoadSnapshotChunk) GetChunk() uint32 { + if m != nil { + return m.Chunk + } + return 0 +} + +// Applies a snapshot chunk +type RequestApplySnapshotChunk struct { + Index uint32 `protobuf:"varint,1,opt,name=index,proto3" json:"index,omitempty"` + Chunk []byte `protobuf:"bytes,2,opt,name=chunk,proto3" json:"chunk,omitempty"` + Sender string `protobuf:"bytes,3,opt,name=sender,proto3" json:"sender,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *RequestApplySnapshotChunk) Reset() { *m = RequestApplySnapshotChunk{} } +func (m *RequestApplySnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*RequestApplySnapshotChunk) ProtoMessage() {} +func (*RequestApplySnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{15} +} +func (m *RequestApplySnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *RequestApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_RequestApplySnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *RequestApplySnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_RequestApplySnapshotChunk.Merge(m, src) +} +func (m *RequestApplySnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *RequestApplySnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_RequestApplySnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_RequestApplySnapshotChunk proto.InternalMessageInfo + +func (m *RequestApplySnapshotChunk) GetIndex() uint32 { + if m != nil { + return m.Index + } + return 0 +} + +func (m *RequestApplySnapshotChunk) GetChunk() []byte { + if m != nil { + return m.Chunk + } + return nil +} + +func (m *RequestApplySnapshotChunk) GetSender() string { + if m != nil { + return m.Sender + } + return "" +} + type Response struct { // Types that are valid to be assigned to Value: // *Response_Exception @@ -895,6 +1239,10 @@ type Response struct { // *Response_DeliverTx // *Response_EndBlock // *Response_Commit + // *Response_ListSnapshots + // *Response_OfferSnapshot + // *Response_LoadSnapshotChunk + // *Response_ApplySnapshotChunk Value isResponse_Value `protobuf_oneof:"value"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -905,7 +1253,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{12} + return fileDescriptor_9f1eaa49c51fa1ac, []int{16} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -977,19 +1325,35 @@ type Response_EndBlock struct { type Response_Commit struct { Commit *ResponseCommit `protobuf:"bytes,12,opt,name=commit,proto3,oneof" json:"commit,omitempty"` } +type Response_ListSnapshots struct { + ListSnapshots *ResponseListSnapshots `protobuf:"bytes,13,opt,name=list_snapshots,json=listSnapshots,proto3,oneof" json:"list_snapshots,omitempty"` +} +type Response_OfferSnapshot struct { + OfferSnapshot *ResponseOfferSnapshot `protobuf:"bytes,14,opt,name=offer_snapshot,json=offerSnapshot,proto3,oneof" json:"offer_snapshot,omitempty"` +} +type Response_LoadSnapshotChunk struct { + LoadSnapshotChunk *ResponseLoadSnapshotChunk `protobuf:"bytes,15,opt,name=load_snapshot_chunk,json=loadSnapshotChunk,proto3,oneof" json:"load_snapshot_chunk,omitempty"` +} +type Response_ApplySnapshotChunk struct { + ApplySnapshotChunk *ResponseApplySnapshotChunk `protobuf:"bytes,16,opt,name=apply_snapshot_chunk,json=applySnapshotChunk,proto3,oneof" json:"apply_snapshot_chunk,omitempty"` +} -func (*Response_Exception) isResponse_Value() {} -func (*Response_Echo) isResponse_Value() {} -func (*Response_Flush) isResponse_Value() {} -func (*Response_Info) isResponse_Value() {} -func (*Response_SetOption) isResponse_Value() {} -func (*Response_InitChain) isResponse_Value() {} -func (*Response_Query) isResponse_Value() {} -func (*Response_BeginBlock) isResponse_Value() {} -func (*Response_CheckTx) isResponse_Value() {} -func (*Response_DeliverTx) isResponse_Value() {} -func (*Response_EndBlock) isResponse_Value() {} -func (*Response_Commit) isResponse_Value() {} +func (*Response_Exception) isResponse_Value() {} +func (*Response_Echo) isResponse_Value() {} +func (*Response_Flush) isResponse_Value() {} +func (*Response_Info) isResponse_Value() {} +func (*Response_SetOption) isResponse_Value() {} +func (*Response_InitChain) isResponse_Value() {} +func (*Response_Query) isResponse_Value() {} +func (*Response_BeginBlock) isResponse_Value() {} +func (*Response_CheckTx) isResponse_Value() {} +func (*Response_DeliverTx) isResponse_Value() {} +func (*Response_EndBlock) isResponse_Value() {} +func (*Response_Commit) isResponse_Value() {} +func (*Response_ListSnapshots) isResponse_Value() {} +func (*Response_OfferSnapshot) isResponse_Value() {} +func (*Response_LoadSnapshotChunk) isResponse_Value() {} +func (*Response_ApplySnapshotChunk) isResponse_Value() {} func (m *Response) GetValue() isResponse_Value { if m != nil { @@ -1082,6 +1446,34 @@ func (m *Response) GetCommit() *ResponseCommit { return nil } +func (m *Response) GetListSnapshots() *ResponseListSnapshots { + if x, ok := m.GetValue().(*Response_ListSnapshots); ok { + return x.ListSnapshots + } + return nil +} + +func (m *Response) GetOfferSnapshot() *ResponseOfferSnapshot { + if x, ok := m.GetValue().(*Response_OfferSnapshot); ok { + return x.OfferSnapshot + } + return nil +} + +func (m *Response) GetLoadSnapshotChunk() *ResponseLoadSnapshotChunk { + if x, ok := m.GetValue().(*Response_LoadSnapshotChunk); ok { + return x.LoadSnapshotChunk + } + return nil +} + +func (m *Response) GetApplySnapshotChunk() *ResponseApplySnapshotChunk { + if x, ok := m.GetValue().(*Response_ApplySnapshotChunk); ok { + return x.ApplySnapshotChunk + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*Response) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1097,6 +1489,10 @@ func (*Response) XXX_OneofWrappers() []interface{} { (*Response_DeliverTx)(nil), (*Response_EndBlock)(nil), (*Response_Commit)(nil), + (*Response_ListSnapshots)(nil), + (*Response_OfferSnapshot)(nil), + (*Response_LoadSnapshotChunk)(nil), + (*Response_ApplySnapshotChunk)(nil), } } @@ -1112,7 +1508,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{13} + return fileDescriptor_9f1eaa49c51fa1ac, []int{17} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1159,7 +1555,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{14} + return fileDescriptor_9f1eaa49c51fa1ac, []int{18} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1205,7 +1601,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{15} + return fileDescriptor_9f1eaa49c51fa1ac, []int{19} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1249,7 +1645,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{16} + return fileDescriptor_9f1eaa49c51fa1ac, []int{20} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1328,7 +1724,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{17} + return fileDescriptor_9f1eaa49c51fa1ac, []int{21} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1390,7 +1786,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{18} + return fileDescriptor_9f1eaa49c51fa1ac, []int{22} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1453,7 +1849,7 @@ func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{19} + return fileDescriptor_9f1eaa49c51fa1ac, []int{23} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1556,7 +1952,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{20} + return fileDescriptor_9f1eaa49c51fa1ac, []int{24} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1610,7 +2006,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{21} + return fileDescriptor_9f1eaa49c51fa1ac, []int{25} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1713,7 +2109,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{22} + return fileDescriptor_9f1eaa49c51fa1ac, []int{26} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1811,7 +2207,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{23} + return fileDescriptor_9f1eaa49c51fa1ac, []int{27} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1874,7 +2270,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{24} + return fileDescriptor_9f1eaa49c51fa1ac, []int{28} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1917,6 +2313,210 @@ func (m *ResponseCommit) GetRetainHeight() int64 { return 0 } +type ResponseListSnapshots struct { + Snapshots []*Snapshot `protobuf:"bytes,1,rep,name=snapshots,proto3" json:"snapshots,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseListSnapshots) Reset() { *m = ResponseListSnapshots{} } +func (m *ResponseListSnapshots) String() string { return proto.CompactTextString(m) } +func (*ResponseListSnapshots) ProtoMessage() {} +func (*ResponseListSnapshots) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{29} +} +func (m *ResponseListSnapshots) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseListSnapshots) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseListSnapshots.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseListSnapshots) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseListSnapshots.Merge(m, src) +} +func (m *ResponseListSnapshots) XXX_Size() int { + return m.Size() +} +func (m *ResponseListSnapshots) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseListSnapshots.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseListSnapshots proto.InternalMessageInfo + +func (m *ResponseListSnapshots) GetSnapshots() []*Snapshot { + if m != nil { + return m.Snapshots + } + return nil +} + +type ResponseOfferSnapshot struct { + Result ResponseOfferSnapshot_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.types.ResponseOfferSnapshot_Result" json:"result,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseOfferSnapshot) Reset() { *m = ResponseOfferSnapshot{} } +func (m *ResponseOfferSnapshot) String() string { return proto.CompactTextString(m) } +func (*ResponseOfferSnapshot) ProtoMessage() {} +func (*ResponseOfferSnapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{30} +} +func (m *ResponseOfferSnapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseOfferSnapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseOfferSnapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseOfferSnapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseOfferSnapshot.Merge(m, src) +} +func (m *ResponseOfferSnapshot) XXX_Size() int { + return m.Size() +} +func (m *ResponseOfferSnapshot) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseOfferSnapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseOfferSnapshot proto.InternalMessageInfo + +func (m *ResponseOfferSnapshot) GetResult() ResponseOfferSnapshot_Result { + if m != nil { + return m.Result + } + return ResponseOfferSnapshot_accept +} + +type ResponseLoadSnapshotChunk struct { + Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseLoadSnapshotChunk) Reset() { *m = ResponseLoadSnapshotChunk{} } +func (m *ResponseLoadSnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseLoadSnapshotChunk) ProtoMessage() {} +func (*ResponseLoadSnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{31} +} +func (m *ResponseLoadSnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseLoadSnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseLoadSnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseLoadSnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseLoadSnapshotChunk.Merge(m, src) +} +func (m *ResponseLoadSnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *ResponseLoadSnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseLoadSnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseLoadSnapshotChunk proto.InternalMessageInfo + +func (m *ResponseLoadSnapshotChunk) GetChunk() []byte { + if m != nil { + return m.Chunk + } + return nil +} + +type ResponseApplySnapshotChunk struct { + Result ResponseApplySnapshotChunk_Result `protobuf:"varint,1,opt,name=result,proto3,enum=tendermint.abci.types.ResponseApplySnapshotChunk_Result" json:"result,omitempty"` + RefetchChunks []uint32 `protobuf:"varint,2,rep,packed,name=refetch_chunks,json=refetchChunks,proto3" json:"refetch_chunks,omitempty"` + RejectSenders []string `protobuf:"bytes,3,rep,name=reject_senders,json=rejectSenders,proto3" json:"reject_senders,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ResponseApplySnapshotChunk) Reset() { *m = ResponseApplySnapshotChunk{} } +func (m *ResponseApplySnapshotChunk) String() string { return proto.CompactTextString(m) } +func (*ResponseApplySnapshotChunk) ProtoMessage() {} +func (*ResponseApplySnapshotChunk) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{32} +} +func (m *ResponseApplySnapshotChunk) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResponseApplySnapshotChunk) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResponseApplySnapshotChunk.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResponseApplySnapshotChunk) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResponseApplySnapshotChunk.Merge(m, src) +} +func (m *ResponseApplySnapshotChunk) XXX_Size() int { + return m.Size() +} +func (m *ResponseApplySnapshotChunk) XXX_DiscardUnknown() { + xxx_messageInfo_ResponseApplySnapshotChunk.DiscardUnknown(m) +} + +var xxx_messageInfo_ResponseApplySnapshotChunk proto.InternalMessageInfo + +func (m *ResponseApplySnapshotChunk) GetResult() ResponseApplySnapshotChunk_Result { + if m != nil { + return m.Result + } + return ResponseApplySnapshotChunk_accept +} + +func (m *ResponseApplySnapshotChunk) GetRefetchChunks() []uint32 { + if m != nil { + return m.RefetchChunks + } + return nil +} + +func (m *ResponseApplySnapshotChunk) GetRejectSenders() []string { + if m != nil { + return m.RejectSenders + } + return nil +} + // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app type ConsensusParams struct { @@ -1932,7 +2532,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{25} + return fileDescriptor_9f1eaa49c51fa1ac, []int{33} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1997,7 +2597,7 @@ func (m *BlockParams) Reset() { *m = BlockParams{} } func (m *BlockParams) String() string { return proto.CompactTextString(m) } func (*BlockParams) ProtoMessage() {} func (*BlockParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{26} + return fileDescriptor_9f1eaa49c51fa1ac, []int{34} } func (m *BlockParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2053,7 +2653,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{27} + return fileDescriptor_9f1eaa49c51fa1ac, []int{35} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2108,7 +2708,7 @@ func (m *ValidatorParams) Reset() { *m = ValidatorParams{} } func (m *ValidatorParams) String() string { return proto.CompactTextString(m) } func (*ValidatorParams) ProtoMessage() {} func (*ValidatorParams) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{28} + return fileDescriptor_9f1eaa49c51fa1ac, []int{36} } func (m *ValidatorParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2156,7 +2756,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{29} + return fileDescriptor_9f1eaa49c51fa1ac, []int{37} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2213,7 +2813,7 @@ func (m *EventAttribute) Reset() { *m = EventAttribute{} } func (m *EventAttribute) String() string { return proto.CompactTextString(m) } func (*EventAttribute) ProtoMessage() {} func (*EventAttribute) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{30} + return fileDescriptor_9f1eaa49c51fa1ac, []int{38} } func (m *EventAttribute) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2275,7 +2875,7 @@ func (m *Event) Reset() { *m = Event{} } func (m *Event) String() string { return proto.CompactTextString(m) } func (*Event) ProtoMessage() {} func (*Event) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{31} + return fileDescriptor_9f1eaa49c51fa1ac, []int{39} } func (m *Event) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2347,7 +2947,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{32} + return fileDescriptor_9f1eaa49c51fa1ac, []int{40} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2486,7 +3086,7 @@ func (m *Version) Reset() { *m = Version{} } func (m *Version) String() string { return proto.CompactTextString(m) } func (*Version) ProtoMessage() {} func (*Version) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{33} + return fileDescriptor_9f1eaa49c51fa1ac, []int{41} } func (m *Version) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2541,7 +3141,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{34} + return fileDescriptor_9f1eaa49c51fa1ac, []int{42} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2596,7 +3196,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{35} + return fileDescriptor_9f1eaa49c51fa1ac, []int{43} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2653,7 +3253,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{36} + return fileDescriptor_9f1eaa49c51fa1ac, []int{44} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2709,7 +3309,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{37} + return fileDescriptor_9f1eaa49c51fa1ac, []int{45} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2765,7 +3365,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{38} + return fileDescriptor_9f1eaa49c51fa1ac, []int{46} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2820,7 +3420,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{39} + return fileDescriptor_9f1eaa49c51fa1ac, []int{47} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2878,7 +3478,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_9f1eaa49c51fa1ac, []int{40} + return fileDescriptor_9f1eaa49c51fa1ac, []int{48} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2942,9 +3542,92 @@ func (m *Evidence) GetTotalVotingPower() int64 { return 0 } +type Snapshot struct { + Height uint64 `protobuf:"varint,1,opt,name=height,proto3" json:"height,omitempty"` + Format uint32 `protobuf:"varint,2,opt,name=format,proto3" json:"format,omitempty"` + Chunks uint32 `protobuf:"varint,3,opt,name=chunks,proto3" json:"chunks,omitempty"` + Hash []byte `protobuf:"bytes,4,opt,name=hash,proto3" json:"hash,omitempty"` + Metadata []byte `protobuf:"bytes,5,opt,name=metadata,proto3" json:"metadata,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Snapshot) Reset() { *m = Snapshot{} } +func (m *Snapshot) String() string { return proto.CompactTextString(m) } +func (*Snapshot) ProtoMessage() {} +func (*Snapshot) Descriptor() ([]byte, []int) { + return fileDescriptor_9f1eaa49c51fa1ac, []int{49} +} +func (m *Snapshot) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Snapshot) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Snapshot.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Snapshot) XXX_Merge(src proto.Message) { + xxx_messageInfo_Snapshot.Merge(m, src) +} +func (m *Snapshot) XXX_Size() int { + return m.Size() +} +func (m *Snapshot) XXX_DiscardUnknown() { + xxx_messageInfo_Snapshot.DiscardUnknown(m) +} + +var xxx_messageInfo_Snapshot proto.InternalMessageInfo + +func (m *Snapshot) GetHeight() uint64 { + if m != nil { + return m.Height + } + return 0 +} + +func (m *Snapshot) GetFormat() uint32 { + if m != nil { + return m.Format + } + return 0 +} + +func (m *Snapshot) GetChunks() uint32 { + if m != nil { + return m.Chunks + } + return 0 +} + +func (m *Snapshot) GetHash() []byte { + if m != nil { + return m.Hash + } + return nil +} + +func (m *Snapshot) GetMetadata() []byte { + if m != nil { + return m.Metadata + } + return nil +} + func init() { proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) golang_proto.RegisterEnum("tendermint.abci.types.CheckTxType", CheckTxType_name, CheckTxType_value) + proto.RegisterEnum("tendermint.abci.types.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) + golang_proto.RegisterEnum("tendermint.abci.types.ResponseOfferSnapshot_Result", ResponseOfferSnapshot_Result_name, ResponseOfferSnapshot_Result_value) + proto.RegisterEnum("tendermint.abci.types.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) + golang_proto.RegisterEnum("tendermint.abci.types.ResponseApplySnapshotChunk_Result", ResponseApplySnapshotChunk_Result_name, ResponseApplySnapshotChunk_Result_value) proto.RegisterType((*Request)(nil), "tendermint.abci.types.Request") golang_proto.RegisterType((*Request)(nil), "tendermint.abci.types.Request") proto.RegisterType((*RequestEcho)(nil), "tendermint.abci.types.RequestEcho") @@ -2969,6 +3652,14 @@ func init() { golang_proto.RegisterType((*RequestEndBlock)(nil), "tendermint.abci.types.RequestEndBlock") proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.types.RequestCommit") golang_proto.RegisterType((*RequestCommit)(nil), "tendermint.abci.types.RequestCommit") + proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.types.RequestListSnapshots") + golang_proto.RegisterType((*RequestListSnapshots)(nil), "tendermint.abci.types.RequestListSnapshots") + proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.types.RequestOfferSnapshot") + golang_proto.RegisterType((*RequestOfferSnapshot)(nil), "tendermint.abci.types.RequestOfferSnapshot") + proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.types.RequestLoadSnapshotChunk") + golang_proto.RegisterType((*RequestLoadSnapshotChunk)(nil), "tendermint.abci.types.RequestLoadSnapshotChunk") + proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.types.RequestApplySnapshotChunk") + golang_proto.RegisterType((*RequestApplySnapshotChunk)(nil), "tendermint.abci.types.RequestApplySnapshotChunk") proto.RegisterType((*Response)(nil), "tendermint.abci.types.Response") golang_proto.RegisterType((*Response)(nil), "tendermint.abci.types.Response") proto.RegisterType((*ResponseException)(nil), "tendermint.abci.types.ResponseException") @@ -2995,6 +3686,14 @@ func init() { golang_proto.RegisterType((*ResponseEndBlock)(nil), "tendermint.abci.types.ResponseEndBlock") proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.types.ResponseCommit") golang_proto.RegisterType((*ResponseCommit)(nil), "tendermint.abci.types.ResponseCommit") + proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.types.ResponseListSnapshots") + golang_proto.RegisterType((*ResponseListSnapshots)(nil), "tendermint.abci.types.ResponseListSnapshots") + proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.types.ResponseOfferSnapshot") + golang_proto.RegisterType((*ResponseOfferSnapshot)(nil), "tendermint.abci.types.ResponseOfferSnapshot") + proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.types.ResponseLoadSnapshotChunk") + golang_proto.RegisterType((*ResponseLoadSnapshotChunk)(nil), "tendermint.abci.types.ResponseLoadSnapshotChunk") + proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.types.ResponseApplySnapshotChunk") + golang_proto.RegisterType((*ResponseApplySnapshotChunk)(nil), "tendermint.abci.types.ResponseApplySnapshotChunk") proto.RegisterType((*ConsensusParams)(nil), "tendermint.abci.types.ConsensusParams") golang_proto.RegisterType((*ConsensusParams)(nil), "tendermint.abci.types.ConsensusParams") proto.RegisterType((*BlockParams)(nil), "tendermint.abci.types.BlockParams") @@ -3027,163 +3726,198 @@ func init() { golang_proto.RegisterType((*PubKey)(nil), "tendermint.abci.types.PubKey") proto.RegisterType((*Evidence)(nil), "tendermint.abci.types.Evidence") golang_proto.RegisterType((*Evidence)(nil), "tendermint.abci.types.Evidence") + proto.RegisterType((*Snapshot)(nil), "tendermint.abci.types.Snapshot") + golang_proto.RegisterType((*Snapshot)(nil), "tendermint.abci.types.Snapshot") } func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } func init() { golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } var fileDescriptor_9f1eaa49c51fa1ac = []byte{ - // 2397 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0x3d, 0x8c, 0x1b, 0xc7, - 0xf5, 0xbf, 0x25, 0x79, 0xfc, 0x78, 0xbc, 0x3b, 0xd2, 0x63, 0xd9, 0xa6, 0xf8, 0x97, 0xef, 0x84, - 0x3d, 0x4b, 0x3a, 0xd9, 0xfe, 0xdf, 0x39, 0x17, 0x38, 0xb0, 0x22, 0xc1, 0xc1, 0xf1, 0x24, 0x87, - 0x84, 0x25, 0x59, 0x5e, 0x7d, 0x44, 0x49, 0x00, 0x2f, 0x86, 0xdc, 0x11, 0xb9, 0x10, 0xb9, 0xbb, - 0xde, 0x1d, 0x9e, 0xc8, 0x20, 0x45, 0xba, 0x20, 0x40, 0x8a, 0x34, 0x01, 0xd2, 0xa4, 0x4f, 0x99, - 0x22, 0x85, 0xcb, 0x94, 0x2e, 0x52, 0xa4, 0x48, 0xad, 0x24, 0x97, 0x54, 0x81, 0xcb, 0x20, 0x48, - 0x19, 0xcc, 0x9b, 0xd9, 0x2f, 0x1e, 0x3f, 0x56, 0x8e, 0xba, 0x34, 0xe4, 0xce, 0xcc, 0x7b, 0x6f, - 0x66, 0xde, 0xcc, 0x7b, 0xbf, 0xf7, 0xde, 0xc0, 0xeb, 0xb4, 0xdb, 0xb3, 0x0f, 0xf8, 0xd4, 0x63, - 0x81, 0xfc, 0xdd, 0xf7, 0x7c, 0x97, 0xbb, 0xe4, 0x35, 0xce, 0x1c, 0x8b, 0xf9, 0x23, 0xdb, 0xe1, - 0xfb, 0x82, 0x64, 0x1f, 0x07, 0x9b, 0x97, 0xf9, 0xc0, 0xf6, 0x2d, 0xd3, 0xa3, 0x3e, 0x9f, 0x1e, - 0x20, 0xe5, 0x41, 0xdf, 0xed, 0xbb, 0xf1, 0x97, 0x64, 0x6f, 0x36, 0x7b, 0xfe, 0xd4, 0xe3, 0xee, - 0xc1, 0x88, 0xf9, 0x4f, 0x87, 0x4c, 0xfd, 0xa9, 0xb1, 0x9d, 0xbe, 0xeb, 0xf6, 0x87, 0x4c, 0xb2, - 0x77, 0xc7, 0x4f, 0x0e, 0xb8, 0x3d, 0x62, 0x01, 0xa7, 0x23, 0x4f, 0x11, 0x6c, 0xcf, 0x12, 0x58, - 0x63, 0x9f, 0x72, 0xdb, 0x75, 0xe4, 0xb8, 0xfe, 0xaf, 0x75, 0x28, 0x19, 0xec, 0xf3, 0x31, 0x0b, - 0x38, 0xf9, 0x00, 0x0a, 0xac, 0x37, 0x70, 0x1b, 0xb9, 0x8b, 0xda, 0x5e, 0xf5, 0x50, 0xdf, 0x9f, - 0xbb, 0xec, 0x7d, 0x45, 0x7d, 0xab, 0x37, 0x70, 0xdb, 0x6b, 0x06, 0x72, 0x90, 0xeb, 0xb0, 0xfe, - 0x64, 0x38, 0x0e, 0x06, 0x8d, 0x3c, 0xb2, 0xee, 0x2e, 0x67, 0xfd, 0x48, 0x90, 0xb6, 0xd7, 0x0c, - 0xc9, 0x23, 0xa6, 0xb5, 0x9d, 0x27, 0x6e, 0xa3, 0x90, 0x65, 0xda, 0x8e, 0xf3, 0x04, 0xa7, 0x15, - 0x1c, 0xa4, 0x0d, 0x10, 0x30, 0x6e, 0xba, 0x9e, 0xd8, 0x50, 0x63, 0x1d, 0xf9, 0xaf, 0x2c, 0xe7, - 0xbf, 0xcf, 0xf8, 0x27, 0x48, 0xde, 0x5e, 0x33, 0x2a, 0x41, 0xd8, 0x10, 0x92, 0x6c, 0xc7, 0xe6, - 0x66, 0x6f, 0x40, 0x6d, 0xa7, 0x51, 0xcc, 0x22, 0xa9, 0xe3, 0xd8, 0xfc, 0x58, 0x90, 0x0b, 0x49, - 0x76, 0xd8, 0x10, 0xaa, 0xf8, 0x7c, 0xcc, 0xfc, 0x69, 0xa3, 0x94, 0x45, 0x15, 0x9f, 0x0a, 0x52, - 0xa1, 0x0a, 0xe4, 0x21, 0x1f, 0x43, 0xb5, 0xcb, 0xfa, 0xb6, 0x63, 0x76, 0x87, 0x6e, 0xef, 0x69, - 0xa3, 0x8c, 0x22, 0xf6, 0x96, 0x8b, 0x68, 0x09, 0x86, 0x96, 0xa0, 0x6f, 0xaf, 0x19, 0xd0, 0x8d, - 0x5a, 0xa4, 0x05, 0xe5, 0xde, 0x80, 0xf5, 0x9e, 0x9a, 0x7c, 0xd2, 0xa8, 0xa0, 0xa4, 0x4b, 0xcb, - 0x25, 0x1d, 0x0b, 0xea, 0x07, 0x93, 0xf6, 0x9a, 0x51, 0xea, 0xc9, 0x4f, 0xa1, 0x17, 0x8b, 0x0d, - 0xed, 0x13, 0xe6, 0x0b, 0x29, 0xaf, 0x66, 0xd1, 0xcb, 0x4d, 0x49, 0x8f, 0x72, 0x2a, 0x56, 0xd8, - 0x20, 0xb7, 0xa0, 0xc2, 0x1c, 0x4b, 0x6d, 0xac, 0x8a, 0x82, 0x2e, 0xaf, 0xb8, 0x61, 0x8e, 0x15, - 0x6e, 0xab, 0xcc, 0xd4, 0x37, 0xf9, 0x10, 0x8a, 0x3d, 0x77, 0x34, 0xb2, 0x79, 0x63, 0x03, 0x65, - 0xbc, 0xb5, 0x62, 0x4b, 0x48, 0xdb, 0x5e, 0x33, 0x14, 0x57, 0xab, 0x04, 0xeb, 0x27, 0x74, 0x38, - 0x66, 0xfa, 0x15, 0xa8, 0x26, 0x6e, 0x32, 0x69, 0x40, 0x69, 0xc4, 0x82, 0x80, 0xf6, 0x59, 0x43, - 0xbb, 0xa8, 0xed, 0x55, 0x8c, 0xb0, 0xa9, 0x6f, 0xc1, 0x46, 0xf2, 0xde, 0xea, 0xa3, 0x88, 0x51, - 0xdc, 0x45, 0xc1, 0x78, 0xc2, 0xfc, 0x40, 0x5c, 0x40, 0xc5, 0xa8, 0x9a, 0x64, 0x17, 0x36, 0x71, - 0xb7, 0x66, 0x38, 0x2e, 0xec, 0xaa, 0x60, 0x6c, 0x60, 0xe7, 0x23, 0x45, 0xb4, 0x03, 0x55, 0xef, - 0xd0, 0x8b, 0x48, 0xf2, 0x48, 0x02, 0xde, 0xa1, 0xa7, 0x08, 0xf4, 0x6f, 0x43, 0x7d, 0xf6, 0xea, - 0x92, 0x3a, 0xe4, 0x9f, 0xb2, 0xa9, 0x9a, 0x4f, 0x7c, 0x92, 0x73, 0x6a, 0x5b, 0x38, 0x47, 0xc5, - 0x50, 0x7b, 0xfc, 0x6d, 0x2e, 0x62, 0x8e, 0x6e, 0xab, 0x30, 0x37, 0xe1, 0x24, 0x90, 0xbb, 0x7a, - 0xd8, 0xdc, 0x97, 0x0e, 0x62, 0x3f, 0x74, 0x10, 0xfb, 0x0f, 0x42, 0x0f, 0xd2, 0x2a, 0x7f, 0xf9, - 0x7c, 0x67, 0xed, 0x17, 0x7f, 0xde, 0xd1, 0x0c, 0xe4, 0x20, 0xe7, 0xc5, 0x85, 0xa2, 0xb6, 0x63, - 0xda, 0x96, 0x9a, 0xa7, 0x84, 0xed, 0x8e, 0x45, 0x3e, 0x85, 0x7a, 0xcf, 0x75, 0x02, 0xe6, 0x04, - 0xe3, 0x40, 0x78, 0x34, 0x3a, 0x0a, 0x94, 0x2f, 0x58, 0x74, 0xc8, 0xc7, 0x21, 0xf9, 0x3d, 0xa4, - 0x36, 0x6a, 0xbd, 0x74, 0x07, 0xb9, 0x0d, 0x70, 0x42, 0x87, 0xb6, 0x45, 0xb9, 0xeb, 0x07, 0x8d, - 0xc2, 0xc5, 0xfc, 0x12, 0x61, 0x8f, 0x42, 0xc2, 0x87, 0x9e, 0x45, 0x39, 0x6b, 0x15, 0xc4, 0xca, - 0x8d, 0x04, 0x3f, 0xb9, 0x0c, 0x35, 0xea, 0x79, 0x66, 0xc0, 0x29, 0x67, 0x66, 0x77, 0xca, 0x59, - 0x80, 0xfe, 0x62, 0xc3, 0xd8, 0xa4, 0x9e, 0x77, 0x5f, 0xf4, 0xb6, 0x44, 0xa7, 0x6e, 0x45, 0xa7, - 0x8d, 0xa6, 0x49, 0x08, 0x14, 0x2c, 0xca, 0x29, 0x6a, 0x6b, 0xc3, 0xc0, 0x6f, 0xd1, 0xe7, 0x51, - 0x3e, 0x50, 0x3a, 0xc0, 0x6f, 0xf2, 0x3a, 0x14, 0x07, 0xcc, 0xee, 0x0f, 0x38, 0x6e, 0x3b, 0x6f, - 0xa8, 0x96, 0x38, 0x18, 0xcf, 0x77, 0x4f, 0x18, 0x7a, 0xb7, 0xb2, 0x21, 0x1b, 0xfa, 0x2f, 0x73, - 0xf0, 0xca, 0x19, 0xf3, 0x15, 0x72, 0x07, 0x34, 0x18, 0x84, 0x73, 0x89, 0x6f, 0x72, 0x5d, 0xc8, - 0xa5, 0x16, 0xf3, 0x95, 0x57, 0x7e, 0x73, 0x81, 0x06, 0xda, 0x48, 0xa4, 0x36, 0xae, 0x58, 0xc8, - 0x43, 0xa8, 0x0f, 0x69, 0xc0, 0x4d, 0x79, 0xf7, 0x4d, 0xf4, 0xb2, 0xf9, 0xa5, 0x9e, 0xe0, 0x36, - 0x0d, 0x6d, 0x46, 0x5c, 0x6e, 0x25, 0x6e, 0x6b, 0x98, 0xea, 0x25, 0x8f, 0xe1, 0x5c, 0x77, 0xfa, - 0x23, 0xea, 0x70, 0xdb, 0x61, 0xe6, 0x99, 0x33, 0xda, 0x59, 0x20, 0xfa, 0xd6, 0x89, 0x6d, 0x31, - 0xa7, 0x17, 0x1e, 0xce, 0xab, 0x91, 0x88, 0xe8, 0xf0, 0x02, 0xfd, 0x31, 0x6c, 0xa5, 0x7d, 0x11, - 0xd9, 0x82, 0x1c, 0x9f, 0x28, 0x8d, 0xe4, 0xf8, 0x84, 0x7c, 0x0b, 0x0a, 0x42, 0x1c, 0x6a, 0x63, - 0x6b, 0x21, 0x58, 0x28, 0xee, 0x07, 0x53, 0x8f, 0x19, 0x48, 0xaf, 0xeb, 0x91, 0x25, 0x44, 0xfe, - 0x69, 0x56, 0xb6, 0x7e, 0x15, 0x6a, 0x33, 0xae, 0x27, 0x71, 0xac, 0x5a, 0xf2, 0x58, 0xf5, 0x1a, - 0x6c, 0xa6, 0x3c, 0x8c, 0xfe, 0x87, 0x22, 0x94, 0x0d, 0x16, 0x78, 0xe2, 0x12, 0x93, 0x36, 0x54, - 0xd8, 0xa4, 0xc7, 0x24, 0x2c, 0x69, 0x2b, 0x9c, 0xb8, 0xe4, 0xb9, 0x15, 0xd2, 0x0b, 0xaf, 0x19, - 0x31, 0x93, 0x6b, 0x29, 0x48, 0xde, 0x5d, 0x25, 0x24, 0x89, 0xc9, 0x37, 0xd2, 0x98, 0xfc, 0xd6, - 0x0a, 0xde, 0x19, 0x50, 0xbe, 0x96, 0x02, 0xe5, 0x55, 0x13, 0xa7, 0x50, 0xb9, 0x33, 0x07, 0x95, - 0x57, 0x6d, 0x7f, 0x01, 0x2c, 0x77, 0xe6, 0xc0, 0xf2, 0xde, 0xca, 0xb5, 0xcc, 0xc5, 0xe5, 0x1b, - 0x69, 0x5c, 0x5e, 0xa5, 0x8e, 0x19, 0x60, 0xbe, 0x3d, 0x0f, 0x98, 0xaf, 0xae, 0x90, 0xb1, 0x10, - 0x99, 0x8f, 0xcf, 0x20, 0xf3, 0xe5, 0x15, 0xa2, 0xe6, 0x40, 0x73, 0x27, 0x05, 0xcd, 0x90, 0x49, - 0x37, 0x0b, 0xb0, 0xf9, 0xa3, 0xb3, 0xd8, 0x7c, 0x65, 0xd5, 0x55, 0x9b, 0x07, 0xce, 0xdf, 0x99, - 0x01, 0xe7, 0x4b, 0xab, 0x76, 0xb5, 0x10, 0x9d, 0xaf, 0x0a, 0xff, 0x38, 0x63, 0x19, 0xc2, 0x97, - 0x32, 0xdf, 0x77, 0x7d, 0x05, 0x7c, 0xb2, 0xa1, 0xef, 0x09, 0x8f, 0x1d, 0xdf, 0xff, 0x25, 0x48, - 0x8e, 0x46, 0x9b, 0xb8, 0xed, 0xfa, 0x17, 0x5a, 0xcc, 0x8b, 0x9e, 0x2d, 0xe9, 0xed, 0x2b, 0xca, - 0xdb, 0x27, 0x00, 0x3e, 0x97, 0x06, 0xf8, 0x1d, 0xa8, 0x0a, 0x4c, 0x99, 0xc1, 0x6e, 0xea, 0x85, - 0xd8, 0x4d, 0xde, 0x86, 0x57, 0xd0, 0xff, 0xca, 0x30, 0x40, 0x39, 0x92, 0x02, 0x3a, 0x92, 0x9a, - 0x18, 0x90, 0x1a, 0x94, 0x40, 0xf1, 0xff, 0xf0, 0x6a, 0x82, 0x56, 0xc8, 0x45, 0x2c, 0x90, 0x20, - 0x55, 0x8f, 0xa8, 0x8f, 0x3c, 0xaf, 0x4d, 0x83, 0x81, 0x7e, 0x27, 0x56, 0x50, 0x1c, 0x17, 0x10, - 0x28, 0xf4, 0x5c, 0x4b, 0xee, 0x7b, 0xd3, 0xc0, 0x6f, 0x11, 0x2b, 0x0c, 0xdd, 0x3e, 0x2e, 0xae, - 0x62, 0x88, 0x4f, 0x41, 0x15, 0x99, 0x76, 0x45, 0xda, 0xac, 0xfe, 0x3b, 0x2d, 0x96, 0x17, 0x87, - 0x0a, 0xf3, 0x50, 0x5d, 0x7b, 0x99, 0xa8, 0x9e, 0xfb, 0xef, 0x50, 0x5d, 0xff, 0xa7, 0x16, 0x1f, - 0x69, 0x84, 0xd7, 0x5f, 0x4f, 0x05, 0xe2, 0x76, 0xd9, 0x8e, 0xc5, 0x26, 0xa8, 0xf2, 0xbc, 0x21, - 0x1b, 0x61, 0xa8, 0x55, 0xc4, 0x63, 0x48, 0x87, 0x5a, 0x25, 0xec, 0x93, 0x0d, 0xf2, 0x3e, 0xe2, - 0xbc, 0xfb, 0x44, 0xb9, 0x86, 0x14, 0x08, 0xca, 0xfc, 0x6d, 0x5f, 0x25, 0x6e, 0xf7, 0x04, 0x99, - 0x21, 0xa9, 0x13, 0xf8, 0x52, 0x49, 0x85, 0x0d, 0x17, 0xa0, 0x22, 0x96, 0x1e, 0x78, 0xb4, 0xc7, - 0xd0, 0xb6, 0x2b, 0x46, 0xdc, 0xa1, 0x5b, 0x40, 0xce, 0xfa, 0x18, 0x72, 0x17, 0x8a, 0xec, 0x84, - 0x39, 0x5c, 0x9c, 0x91, 0x50, 0xeb, 0x85, 0x85, 0x40, 0xcc, 0x1c, 0xde, 0x6a, 0x08, 0x65, 0xfe, - 0xe3, 0xf9, 0x4e, 0x5d, 0xf2, 0xbc, 0xeb, 0x8e, 0x6c, 0xce, 0x46, 0x1e, 0x9f, 0x1a, 0x4a, 0x8a, - 0xfe, 0xd3, 0x9c, 0xc0, 0xc3, 0x94, 0xff, 0x99, 0xab, 0xde, 0xd0, 0x68, 0x72, 0x89, 0x10, 0x29, - 0x9b, 0xca, 0xdf, 0x04, 0xe8, 0xd3, 0xc0, 0x7c, 0x46, 0x1d, 0xce, 0x2c, 0xa5, 0xf7, 0x4a, 0x9f, - 0x06, 0xdf, 0xc3, 0x0e, 0x11, 0x6f, 0x8a, 0xe1, 0x71, 0xc0, 0x2c, 0x3c, 0x80, 0xbc, 0x51, 0xea, - 0xd3, 0xe0, 0x61, 0xc0, 0xac, 0xc4, 0x5e, 0x4b, 0x2f, 0x63, 0xaf, 0x69, 0x7d, 0x97, 0x67, 0xf5, - 0xfd, 0xb3, 0x5c, 0x6c, 0x1d, 0x71, 0xf8, 0xf0, 0xbf, 0xa9, 0x8b, 0x5f, 0x63, 0x4e, 0x91, 0x06, - 0x01, 0xf2, 0x7d, 0x78, 0x25, 0xb2, 0x4a, 0x73, 0x8c, 0xd6, 0x1a, 0xde, 0xc2, 0x17, 0x33, 0xee, - 0xfa, 0x49, 0xba, 0x3b, 0x20, 0x9f, 0xc1, 0x1b, 0x33, 0x3e, 0x28, 0x9a, 0x20, 0xf7, 0x42, 0xae, - 0xe8, 0xb5, 0xb4, 0x2b, 0x0a, 0xe5, 0xc7, 0xda, 0xcb, 0xbf, 0x14, 0xab, 0xe9, 0x88, 0x10, 0x36, - 0x09, 0x6f, 0x73, 0xef, 0xc4, 0x2e, 0x6c, 0xfa, 0x8c, 0x8b, 0x5c, 0x2a, 0x95, 0x35, 0x6c, 0xc8, - 0x4e, 0x09, 0x09, 0xfa, 0x9f, 0x34, 0xa8, 0xcd, 0xec, 0x82, 0x7c, 0x00, 0xeb, 0x12, 0xa6, 0xb5, - 0xa5, 0xd5, 0x12, 0x3c, 0x16, 0xb5, 0x71, 0xc9, 0x40, 0x8e, 0xa0, 0xcc, 0x54, 0x08, 0xae, 0x34, - 0x77, 0x69, 0x45, 0xa4, 0xae, 0xf8, 0x23, 0x36, 0x72, 0x13, 0x2a, 0xd1, 0xf9, 0xac, 0x48, 0xef, - 0xa2, 0xe3, 0x55, 0x42, 0x62, 0x46, 0xfd, 0x18, 0xaa, 0x89, 0xe5, 0x91, 0xff, 0x83, 0xca, 0x88, - 0x4e, 0x54, 0x4e, 0x26, 0xa3, 0xec, 0xf2, 0x88, 0x4e, 0x30, 0x1d, 0x23, 0x6f, 0x40, 0x49, 0x0c, - 0xf6, 0xa9, 0x3c, 0xed, 0xbc, 0x51, 0x1c, 0xd1, 0xc9, 0x77, 0x69, 0xa0, 0xff, 0x5c, 0x83, 0xad, - 0xf4, 0x3a, 0xc9, 0x3b, 0x40, 0x04, 0x2d, 0xed, 0x33, 0xd3, 0x19, 0x8f, 0x24, 0x90, 0x86, 0x12, - 0x6b, 0x23, 0x3a, 0x39, 0xea, 0xb3, 0xbb, 0xe3, 0x11, 0x4e, 0x1d, 0x90, 0x3b, 0x50, 0x0f, 0x89, - 0xc3, 0x8a, 0x98, 0xd2, 0xca, 0xf9, 0x33, 0x19, 0xf1, 0x4d, 0x45, 0x20, 0x13, 0xe2, 0x5f, 0x89, - 0x84, 0x78, 0x4b, 0xca, 0x0b, 0x47, 0xf4, 0xf7, 0xa1, 0x36, 0xb3, 0x63, 0xa2, 0xc3, 0xa6, 0x37, - 0xee, 0x9a, 0x4f, 0xd9, 0xd4, 0x44, 0x95, 0xa0, 0x3d, 0x54, 0x8c, 0xaa, 0x37, 0xee, 0x7e, 0xcc, - 0xa6, 0x22, 0x35, 0x09, 0xf4, 0x1e, 0x6c, 0xa5, 0x33, 0x2e, 0x81, 0x2e, 0xbe, 0x3b, 0x76, 0x2c, - 0x5c, 0xf7, 0xba, 0x21, 0x1b, 0xe4, 0x3a, 0xac, 0x9f, 0xb8, 0xf2, 0xca, 0x2f, 0x4b, 0xb1, 0x1e, - 0xb9, 0x9c, 0x25, 0xf2, 0x36, 0xc9, 0xa3, 0xdf, 0x15, 0x9a, 0x62, 0x0e, 0x3f, 0xe2, 0xdc, 0xb7, - 0xbb, 0x63, 0xce, 0x92, 0xf5, 0x83, 0x8d, 0x39, 0xf5, 0x83, 0x08, 0xd4, 0x22, 0x48, 0xcc, 0xcb, - 0xe4, 0x15, 0x1b, 0xfa, 0x4f, 0x34, 0x58, 0x47, 0x81, 0xe2, 0x66, 0x63, 0x32, 0xa6, 0xc2, 0x25, - 0xf1, 0x4d, 0x7a, 0x00, 0x34, 0x9c, 0x28, 0x5c, 0xef, 0xa5, 0x65, 0x36, 0x15, 0x2d, 0xab, 0x75, - 0x41, 0x19, 0xd7, 0xb9, 0x58, 0x40, 0xc2, 0xc0, 0x12, 0x62, 0xf5, 0xaf, 0x0a, 0x50, 0x94, 0x19, - 0x2f, 0xf9, 0x30, 0x5d, 0x7f, 0xa9, 0x1e, 0x6e, 0x2f, 0x52, 0x8e, 0xa4, 0x52, 0xba, 0x89, 0x82, - 0xb8, 0xcb, 0xb3, 0x45, 0x8d, 0x56, 0xf5, 0xf4, 0xf9, 0x4e, 0x09, 0x03, 0xa0, 0xce, 0xcd, 0xb8, - 0xc2, 0xb1, 0x28, 0xc1, 0x0f, 0xcb, 0x29, 0x85, 0x17, 0x2e, 0xa7, 0xb4, 0x61, 0x33, 0x11, 0xf1, - 0xd9, 0x96, 0x4a, 0x95, 0xb6, 0x97, 0x99, 0x74, 0xe7, 0xa6, 0x5a, 0x7f, 0x35, 0x8a, 0x08, 0x3b, - 0x16, 0xd9, 0x4b, 0xe7, 0xf9, 0x18, 0x38, 0xca, 0x88, 0x25, 0x91, 0xba, 0x8b, 0xb0, 0x51, 0x18, - 0x9b, 0xf0, 0x3f, 0x92, 0x44, 0x06, 0x30, 0x65, 0xd1, 0x81, 0x83, 0x57, 0xa0, 0x16, 0xc7, 0x56, - 0x92, 0xa4, 0x2c, 0xa5, 0xc4, 0xdd, 0x48, 0xf8, 0x1e, 0x9c, 0x73, 0xd8, 0x84, 0x9b, 0xb3, 0xd4, - 0x15, 0xa4, 0x26, 0x62, 0xec, 0x51, 0x9a, 0xe3, 0x12, 0x6c, 0xc5, 0x5e, 0x1c, 0x69, 0x41, 0x56, - 0x5f, 0xa2, 0x5e, 0x24, 0x3b, 0x0f, 0xe5, 0x28, 0xf2, 0xad, 0x22, 0x41, 0x89, 0xca, 0x80, 0x37, - 0x8a, 0xa5, 0x7d, 0x16, 0x8c, 0x87, 0x5c, 0x09, 0xd9, 0x40, 0x1a, 0x8c, 0xa5, 0x0d, 0xd9, 0x8f, - 0xb4, 0xbb, 0xb0, 0x19, 0xfa, 0x2c, 0x49, 0xb7, 0x89, 0x74, 0x1b, 0x61, 0x27, 0x12, 0x5d, 0x85, - 0xba, 0xe7, 0xbb, 0x9e, 0x1b, 0x30, 0xdf, 0xa4, 0x96, 0xe5, 0xb3, 0x20, 0x68, 0x6c, 0x49, 0x79, - 0x61, 0xff, 0x91, 0xec, 0xd6, 0xbf, 0x01, 0xa5, 0x30, 0xa4, 0x3f, 0x07, 0xeb, 0xad, 0xc8, 0xff, - 0x16, 0x0c, 0xd9, 0x10, 0x06, 0x75, 0xe4, 0x79, 0xaa, 0xc0, 0x27, 0x3e, 0xf5, 0x21, 0x94, 0xd4, - 0x81, 0xcd, 0x2d, 0xeb, 0xdc, 0x81, 0x0d, 0x8f, 0xfa, 0x62, 0x1b, 0xc9, 0xe2, 0xce, 0xa2, 0xa4, - 0xf4, 0x1e, 0xf5, 0xf9, 0x7d, 0xc6, 0x53, 0x35, 0x9e, 0x2a, 0xf2, 0xcb, 0x2e, 0xfd, 0x1a, 0x6c, - 0xa6, 0x68, 0xc4, 0x32, 0xb9, 0xcb, 0xe9, 0x30, 0x74, 0x23, 0xd8, 0x88, 0x56, 0x92, 0x8b, 0x57, - 0xa2, 0x5f, 0x87, 0x4a, 0x74, 0x56, 0x22, 0xd7, 0x09, 0x55, 0xa1, 0x29, 0xf5, 0xcb, 0x26, 0xd6, - 0xb1, 0xdc, 0x67, 0xcc, 0x57, 0xb7, 0x5f, 0x36, 0x74, 0x96, 0x70, 0x7b, 0x12, 0x50, 0xc9, 0x0d, - 0x28, 0x29, 0xb7, 0xa7, 0xec, 0x71, 0x51, 0xc5, 0xea, 0x1e, 0xfa, 0xc1, 0xb0, 0x62, 0x25, 0xbd, - 0x62, 0x3c, 0x4d, 0x2e, 0x39, 0xcd, 0x8f, 0xa1, 0x1c, 0xba, 0xb6, 0x34, 0x06, 0xc9, 0x19, 0x2e, - 0xae, 0xc2, 0x20, 0x35, 0x49, 0xcc, 0x28, 0x6e, 0x53, 0x60, 0xf7, 0x1d, 0x66, 0x99, 0xb1, 0x09, - 0xe2, 0x9c, 0x65, 0xa3, 0x26, 0x07, 0x6e, 0x87, 0xf6, 0xa5, 0xbf, 0x07, 0x45, 0xb9, 0xd6, 0xb9, - 0xfe, 0x6e, 0x0e, 0xba, 0xeb, 0x7f, 0xd7, 0xa0, 0x1c, 0x82, 0xd3, 0x5c, 0xa6, 0xd4, 0x26, 0x72, - 0x5f, 0x77, 0x13, 0x2f, 0xdf, 0x25, 0xbd, 0x0b, 0x04, 0x6f, 0x8a, 0x79, 0xe2, 0x72, 0xdb, 0xe9, - 0x9b, 0xf2, 0x2c, 0x64, 0x30, 0x5a, 0xc7, 0x91, 0x47, 0x38, 0x70, 0x4f, 0xf4, 0xbf, 0xbd, 0x0b, - 0xd5, 0x44, 0xa1, 0x8d, 0x94, 0x20, 0x7f, 0x97, 0x3d, 0xab, 0xaf, 0x91, 0x2a, 0x94, 0x0c, 0x86, - 0x65, 0x8a, 0xba, 0x76, 0xf8, 0x55, 0x09, 0x6a, 0x47, 0xad, 0xe3, 0xce, 0x91, 0xe7, 0x0d, 0xed, - 0x1e, 0xa2, 0x25, 0xf9, 0x04, 0x0a, 0x98, 0xaa, 0x67, 0x78, 0x62, 0x6a, 0x66, 0xa9, 0x79, 0x11, - 0x03, 0xd6, 0x31, 0xa3, 0x27, 0x59, 0x5e, 0x9e, 0x9a, 0x99, 0x4a, 0x61, 0x62, 0x91, 0x78, 0xe1, - 0x32, 0x3c, 0x48, 0x35, 0xb3, 0xd4, 0xc7, 0xc8, 0x67, 0x50, 0x89, 0x53, 0xf5, 0xac, 0xcf, 0x54, - 0xcd, 0xcc, 0x95, 0x33, 0x21, 0x3f, 0x4e, 0x4e, 0xb2, 0x3e, 0xd2, 0x34, 0x33, 0x97, 0x8c, 0xc8, - 0x63, 0x28, 0x85, 0x69, 0x60, 0xb6, 0x87, 0xa4, 0x66, 0xc6, 0xaa, 0x96, 0x38, 0x3e, 0x99, 0xbd, - 0x67, 0x79, 0x2d, 0x6b, 0x66, 0x2a, 0xdd, 0x91, 0x87, 0x50, 0x54, 0xf1, 0x77, 0xa6, 0x27, 0xa2, - 0x66, 0xb6, 0x5a, 0x95, 0x50, 0x72, 0x5c, 0x1f, 0xc9, 0xfa, 0x42, 0xd8, 0xcc, 0x5c, 0xb3, 0x24, - 0x14, 0x20, 0x91, 0xd2, 0x67, 0x7e, 0xfa, 0x6b, 0x66, 0xaf, 0x45, 0x92, 0x1f, 0x42, 0x39, 0x4a, - 0xdc, 0x32, 0x3e, 0xc1, 0x35, 0xb3, 0x96, 0x03, 0x5b, 0x9d, 0x7f, 0xff, 0x75, 0x5b, 0xfb, 0xcd, - 0xe9, 0xb6, 0xf6, 0xc5, 0xe9, 0xb6, 0xf6, 0xe5, 0xe9, 0xb6, 0xf6, 0xc7, 0xd3, 0x6d, 0xed, 0x2f, - 0xa7, 0xdb, 0xda, 0xef, 0xff, 0xb6, 0xad, 0xfd, 0xe0, 0x9d, 0xbe, 0xcd, 0x07, 0xe3, 0xee, 0x7e, - 0xcf, 0x1d, 0x1d, 0xc4, 0x02, 0x93, 0x9f, 0xf1, 0x13, 0x7a, 0xb7, 0x88, 0x0e, 0xeb, 0x9b, 0xff, - 0x09, 0x00, 0x00, 0xff, 0xff, 0x61, 0xd2, 0x8e, 0xea, 0x57, 0x1f, 0x00, 0x00, + // 2928 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x5a, 0x3d, 0x90, 0x23, 0x47, + 0xf5, 0xdf, 0x91, 0xb4, 0x92, 0xe6, 0x69, 0xf5, 0x71, 0x7d, 0xeb, 0xb3, 0xac, 0xbf, 0xbd, 0x7b, + 0x35, 0xe7, 0xfb, 0xb2, 0xfd, 0xdf, 0x3d, 0xaf, 0xcb, 0x94, 0xcd, 0x19, 0x53, 0xab, 0xbd, 0x33, + 0xda, 0xf2, 0xf9, 0x7c, 0x9e, 0xfb, 0xc0, 0x40, 0x95, 0x87, 0xd6, 0x4c, 0xaf, 0x34, 0x3e, 0x69, + 0x66, 0x3c, 0xd3, 0x5a, 0xaf, 0x28, 0x02, 0x8a, 0x84, 0xa2, 0x8a, 0x00, 0x02, 0xaa, 0x48, 0xc8, + 0x09, 0x09, 0xa8, 0xb2, 0x43, 0x12, 0xaa, 0x1c, 0x12, 0x10, 0x9f, 0x61, 0x21, 0x02, 0x42, 0x02, + 0x42, 0xaa, 0x3f, 0xe6, 0x4b, 0x2b, 0x69, 0x46, 0xe6, 0x32, 0x92, 0xdd, 0xe9, 0x9e, 0xf7, 0x5e, + 0x77, 0xbf, 0x9e, 0xfe, 0xbd, 0xdf, 0x7b, 0x6a, 0xb8, 0x80, 0xfb, 0xa6, 0xbd, 0x4b, 0xa7, 0x1e, + 0x09, 0xc4, 0xdf, 0x1d, 0xcf, 0x77, 0xa9, 0x8b, 0x9e, 0xa1, 0xc4, 0xb1, 0x88, 0x3f, 0xb6, 0x1d, + 0xba, 0xc3, 0x44, 0x76, 0xf8, 0xcb, 0xce, 0x15, 0x3a, 0xb4, 0x7d, 0xcb, 0xf0, 0xb0, 0x4f, 0xa7, + 0xbb, 0x5c, 0x72, 0x77, 0xe0, 0x0e, 0xdc, 0xf8, 0x49, 0xa8, 0x77, 0x3a, 0xa6, 0x3f, 0xf5, 0xa8, + 0xbb, 0x3b, 0x26, 0xfe, 0xe3, 0x11, 0x91, 0xff, 0xe4, 0xbb, 0xed, 0x81, 0xeb, 0x0e, 0x46, 0x44, + 0xa8, 0xf7, 0x27, 0x47, 0xbb, 0xd4, 0x1e, 0x93, 0x80, 0xe2, 0xb1, 0x27, 0x05, 0xb6, 0x66, 0x05, + 0xac, 0x89, 0x8f, 0xa9, 0xed, 0x3a, 0xe2, 0xbd, 0xf6, 0x8f, 0x2a, 0x54, 0x74, 0xf2, 0xc9, 0x84, + 0x04, 0x14, 0xbd, 0x01, 0x25, 0x62, 0x0e, 0xdd, 0x76, 0xe1, 0xa2, 0x72, 0xad, 0xb6, 0xa7, 0xed, + 0xcc, 0x9d, 0xf6, 0x8e, 0x94, 0xbe, 0x6d, 0x0e, 0xdd, 0xde, 0x9a, 0xce, 0x35, 0xd0, 0x4d, 0x58, + 0x3f, 0x1a, 0x4d, 0x82, 0x61, 0xbb, 0xc8, 0x55, 0x2f, 0x2d, 0x57, 0x7d, 0x87, 0x89, 0xf6, 0xd6, + 0x74, 0xa1, 0xc3, 0x86, 0xb5, 0x9d, 0x23, 0xb7, 0x5d, 0xca, 0x33, 0xec, 0xa1, 0x73, 0xc4, 0x87, + 0x65, 0x1a, 0xa8, 0x07, 0x10, 0x10, 0x6a, 0xb8, 0x1e, 0x5b, 0x50, 0x7b, 0x9d, 0xeb, 0x5f, 0x5d, + 0xae, 0x7f, 0x9f, 0xd0, 0xf7, 0xb9, 0x78, 0x6f, 0x4d, 0x57, 0x83, 0xb0, 0xc1, 0x2c, 0xd9, 0x8e, + 0x4d, 0x0d, 0x73, 0x88, 0x6d, 0xa7, 0x5d, 0xce, 0x63, 0xe9, 0xd0, 0xb1, 0xe9, 0x01, 0x13, 0x67, + 0x96, 0xec, 0xb0, 0xc1, 0x5c, 0xf1, 0xc9, 0x84, 0xf8, 0xd3, 0x76, 0x25, 0x8f, 0x2b, 0x3e, 0x60, + 0xa2, 0xcc, 0x15, 0x5c, 0x07, 0xbd, 0x0b, 0xb5, 0x3e, 0x19, 0xd8, 0x8e, 0xd1, 0x1f, 0xb9, 0xe6, + 0xe3, 0x76, 0x95, 0x9b, 0xb8, 0xb6, 0xdc, 0x44, 0x97, 0x29, 0x74, 0x99, 0x7c, 0x6f, 0x4d, 0x87, + 0x7e, 0xd4, 0x42, 0x5d, 0xa8, 0x9a, 0x43, 0x62, 0x3e, 0x36, 0xe8, 0x49, 0x5b, 0xe5, 0x96, 0x2e, + 0x2f, 0xb7, 0x74, 0xc0, 0xa4, 0x1f, 0x9c, 0xf4, 0xd6, 0xf4, 0x8a, 0x29, 0x1e, 0x99, 0x5f, 0x2c, + 0x32, 0xb2, 0x8f, 0x89, 0xcf, 0xac, 0x9c, 0xcf, 0xe3, 0x97, 0x5b, 0x42, 0x9e, 0xdb, 0x51, 0xad, + 0xb0, 0x81, 0x6e, 0x83, 0x4a, 0x1c, 0x4b, 0x2e, 0xac, 0xc6, 0x0d, 0x5d, 0xc9, 0xf8, 0xc2, 0x1c, + 0x2b, 0x5c, 0x56, 0x95, 0xc8, 0x67, 0xf4, 0x36, 0x94, 0x4d, 0x77, 0x3c, 0xb6, 0x69, 0x7b, 0x83, + 0xdb, 0x78, 0x31, 0x63, 0x49, 0x5c, 0xb6, 0xb7, 0xa6, 0x4b, 0x2d, 0xf4, 0x00, 0x1a, 0x23, 0x3b, + 0xa0, 0x46, 0xe0, 0x60, 0x2f, 0x18, 0xba, 0x34, 0x68, 0xd7, 0xb9, 0x9d, 0x97, 0x97, 0xdb, 0xb9, + 0x63, 0x07, 0xf4, 0x7e, 0xa8, 0xd2, 0x5b, 0xd3, 0xeb, 0xa3, 0x64, 0x07, 0xb3, 0xea, 0x1e, 0x1d, + 0x11, 0x3f, 0x32, 0xdb, 0x6e, 0xe4, 0xb1, 0xfa, 0x3e, 0xd3, 0x09, 0xad, 0x30, 0xab, 0x6e, 0xb2, + 0x03, 0x61, 0x38, 0x3f, 0x72, 0xb1, 0x15, 0x19, 0x35, 0xcc, 0xe1, 0xc4, 0x79, 0xdc, 0x6e, 0x72, + 0xd3, 0xbb, 0x19, 0x13, 0x76, 0xb1, 0x15, 0x1a, 0x3a, 0x60, 0x6a, 0xbd, 0x35, 0xfd, 0xdc, 0x68, + 0xb6, 0x13, 0x59, 0xb0, 0x89, 0x3d, 0x6f, 0x34, 0x9d, 0x1d, 0xa3, 0xc5, 0xc7, 0xb8, 0xb1, 0x7c, + 0x8c, 0x7d, 0xa6, 0x39, 0x3b, 0x08, 0xc2, 0x67, 0x7a, 0xbb, 0x15, 0x58, 0x3f, 0xc6, 0xa3, 0x09, + 0xd1, 0xae, 0x42, 0x2d, 0x01, 0x1f, 0xa8, 0x0d, 0x95, 0x31, 0x09, 0x02, 0x3c, 0x20, 0x6d, 0xe5, + 0xa2, 0x72, 0x4d, 0xd5, 0xc3, 0xa6, 0xd6, 0x80, 0x8d, 0x24, 0x58, 0x68, 0xe3, 0x48, 0x91, 0x01, + 0x00, 0x53, 0x3c, 0x26, 0x7e, 0xc0, 0x4e, 0xbd, 0x54, 0x94, 0x4d, 0x74, 0x09, 0xea, 0xfc, 0x13, + 0x33, 0xc2, 0xf7, 0x0c, 0xcc, 0x4a, 0xfa, 0x06, 0xef, 0x7c, 0x24, 0x85, 0xb6, 0xa1, 0xe6, 0xed, + 0x79, 0x91, 0x48, 0x91, 0x8b, 0x80, 0xb7, 0xe7, 0x49, 0x01, 0xed, 0xeb, 0xd0, 0x9a, 0xc5, 0x0b, + 0xd4, 0x82, 0xe2, 0x63, 0x32, 0x95, 0xe3, 0xb1, 0x47, 0xb4, 0x29, 0x97, 0xc5, 0xc7, 0x50, 0x75, + 0xb9, 0xc6, 0xdf, 0x16, 0x22, 0xe5, 0x08, 0x22, 0x18, 0xc6, 0x31, 0x64, 0xe6, 0xda, 0xb5, 0xbd, + 0xce, 0x8e, 0x40, 0xe5, 0x9d, 0x10, 0x95, 0x77, 0x1e, 0x84, 0xb0, 0xdd, 0xad, 0x7e, 0xf1, 0x64, + 0x7b, 0xed, 0xe7, 0x5f, 0x6e, 0x2b, 0x3a, 0xd7, 0x40, 0xcf, 0xb1, 0x53, 0x8c, 0x6d, 0xc7, 0xb0, + 0x2d, 0x39, 0x4e, 0x85, 0xb7, 0x0f, 0x2d, 0xf4, 0x01, 0xb4, 0x4c, 0xd7, 0x09, 0x88, 0x13, 0x4c, + 0x02, 0x16, 0x46, 0xf0, 0x38, 0x90, 0x00, 0xbc, 0xe8, 0x64, 0x1d, 0x84, 0xe2, 0xf7, 0xb8, 0xb4, + 0xde, 0x34, 0xd3, 0x1d, 0xe8, 0x0e, 0xc0, 0x31, 0x1e, 0xd9, 0x16, 0xa6, 0xae, 0x1f, 0xb4, 0x4b, + 0x17, 0x8b, 0x4b, 0x8c, 0x3d, 0x0a, 0x05, 0x1f, 0x7a, 0x16, 0xa6, 0xa4, 0x5b, 0x62, 0x33, 0xd7, + 0x13, 0xfa, 0xe8, 0x0a, 0x34, 0xb1, 0xe7, 0x19, 0x01, 0xc5, 0x94, 0x18, 0xfd, 0x29, 0x25, 0x01, + 0x07, 0xe9, 0x0d, 0xbd, 0x8e, 0x3d, 0xef, 0x3e, 0xeb, 0xed, 0xb2, 0x4e, 0xcd, 0x8a, 0x76, 0x9b, + 0xe3, 0x21, 0x42, 0x50, 0xb2, 0x30, 0xc5, 0xdc, 0x5b, 0x1b, 0x3a, 0x7f, 0x66, 0x7d, 0x1e, 0xa6, + 0x43, 0xe9, 0x03, 0xfe, 0x8c, 0x2e, 0x40, 0x79, 0x48, 0xec, 0xc1, 0x90, 0xf2, 0x65, 0x17, 0x75, + 0xd9, 0x62, 0x1b, 0xe3, 0xf9, 0xee, 0x31, 0xe1, 0x21, 0xa5, 0xaa, 0x8b, 0x86, 0xf6, 0xcb, 0x02, + 0x9c, 0x3b, 0x83, 0x99, 0xcc, 0xee, 0x10, 0x07, 0xc3, 0x70, 0x2c, 0xf6, 0x8c, 0x6e, 0x32, 0xbb, + 0xd8, 0x22, 0xbe, 0x0c, 0x85, 0x2f, 0x2c, 0xf0, 0x40, 0x8f, 0x0b, 0xc9, 0x85, 0x4b, 0x15, 0xf4, + 0x10, 0x5a, 0x23, 0x1c, 0x50, 0x43, 0x00, 0x8e, 0xc1, 0x43, 0x5b, 0x71, 0x29, 0xfc, 0xde, 0xc1, + 0x21, 0x50, 0xb1, 0x8f, 0x5b, 0x9a, 0x6b, 0x8c, 0x52, 0xbd, 0xe8, 0x43, 0xd8, 0xec, 0x4f, 0x7f, + 0x80, 0x1d, 0x6a, 0x3b, 0xc4, 0x38, 0xb3, 0x47, 0xdb, 0x0b, 0x4c, 0xdf, 0x3e, 0xb6, 0x2d, 0xe2, + 0x98, 0xe1, 0xe6, 0x9c, 0x8f, 0x4c, 0x44, 0x9b, 0x17, 0x68, 0x1f, 0x42, 0x23, 0x1d, 0x00, 0x50, + 0x03, 0x0a, 0xf4, 0x44, 0x7a, 0xa4, 0x40, 0x4f, 0xd0, 0xd7, 0xa0, 0xc4, 0xcc, 0x71, 0x6f, 0x34, + 0x16, 0x46, 0x68, 0xa9, 0xfd, 0x60, 0xea, 0x11, 0x9d, 0xcb, 0x6b, 0x5a, 0x74, 0x12, 0xa2, 0xa0, + 0x30, 0x6b, 0x5b, 0xbb, 0x0e, 0xcd, 0x19, 0xbc, 0x4f, 0x6c, 0xab, 0x92, 0xdc, 0x56, 0xad, 0x09, + 0xf5, 0x14, 0xac, 0x6b, 0x17, 0x60, 0x73, 0x1e, 0x3e, 0x6b, 0x4e, 0xd4, 0x9f, 0x42, 0x58, 0x74, + 0x13, 0xaa, 0x11, 0x40, 0x8b, 0x93, 0xb8, 0xc8, 0x6f, 0xa1, 0x8a, 0x1e, 0x29, 0xb0, 0x83, 0xc8, + 0x3e, 0x66, 0xfe, 0xb1, 0x14, 0xf8, 0xf4, 0x2b, 0xd8, 0xf3, 0x7a, 0x38, 0x18, 0x6a, 0xdf, 0x87, + 0xf6, 0x22, 0xd8, 0x9d, 0x59, 0x4c, 0x29, 0xfa, 0x46, 0x2f, 0x40, 0xf9, 0xc8, 0xf5, 0xc7, 0x98, + 0x72, 0x63, 0x75, 0x5d, 0xb6, 0xd8, 0xb7, 0x2b, 0x20, 0xb8, 0xc8, 0xbb, 0x45, 0x43, 0x33, 0xe0, + 0xb9, 0x85, 0xa0, 0xcb, 0x54, 0x6c, 0xc7, 0x22, 0xc2, 0xab, 0x75, 0x5d, 0x34, 0x62, 0x43, 0x62, + 0xb2, 0xa2, 0xc1, 0x86, 0x0d, 0xf8, 0x8a, 0xb9, 0x7d, 0x55, 0x97, 0x2d, 0xed, 0x0f, 0x2a, 0x54, + 0x75, 0x12, 0x78, 0x0c, 0x0f, 0x50, 0x0f, 0x54, 0x72, 0x62, 0x12, 0x41, 0xab, 0x94, 0x0c, 0x12, + 0x22, 0x74, 0x6e, 0x87, 0xf2, 0x2c, 0xea, 0x47, 0xca, 0xe8, 0xcd, 0x14, 0xa5, 0xbc, 0x94, 0x65, + 0x24, 0xc9, 0x29, 0xdf, 0x4a, 0x73, 0xca, 0x17, 0x33, 0x74, 0x67, 0x48, 0xe5, 0x9b, 0x29, 0x52, + 0x99, 0x35, 0x70, 0x8a, 0x55, 0x1e, 0xce, 0x61, 0x95, 0x59, 0xcb, 0x5f, 0x40, 0x2b, 0x0f, 0xe7, + 0xd0, 0xca, 0x6b, 0x99, 0x73, 0x99, 0xcb, 0x2b, 0xdf, 0x4a, 0xf3, 0xca, 0x2c, 0x77, 0xcc, 0x10, + 0xcb, 0x3b, 0xf3, 0x88, 0xe5, 0xf5, 0x0c, 0x1b, 0x0b, 0x99, 0xe5, 0xc1, 0x19, 0x66, 0x79, 0x25, + 0xc3, 0xd4, 0x1c, 0x6a, 0x79, 0x98, 0xa2, 0x96, 0x90, 0xcb, 0x37, 0x0b, 0xb8, 0xe5, 0x3b, 0x67, + 0xb9, 0xe5, 0xd5, 0xac, 0x4f, 0x6d, 0x1e, 0xb9, 0xfc, 0xe6, 0x0c, 0xb9, 0xbc, 0x9c, 0xb5, 0xaa, + 0x59, 0x76, 0xf9, 0x70, 0x01, 0xbb, 0x7c, 0x25, 0xc3, 0x50, 0x06, 0xbd, 0x7c, 0xb8, 0x80, 0x5e, + 0x66, 0x99, 0xcd, 0xe0, 0x97, 0xfd, 0x65, 0xfc, 0xf2, 0x46, 0xd6, 0x94, 0xf3, 0x11, 0x4c, 0xb2, + 0x94, 0x60, 0xbe, 0x9a, 0x31, 0xc8, 0xea, 0x0c, 0xf3, 0x3a, 0x8b, 0xf1, 0x33, 0x90, 0xc4, 0xa0, + 0x90, 0xf8, 0xbe, 0xeb, 0x4b, 0xf2, 0x26, 0x1a, 0xda, 0x35, 0xc6, 0x3a, 0x62, 0xe0, 0x59, 0xc2, + 0x46, 0x79, 0xe0, 0x49, 0xc0, 0x8c, 0xf6, 0xb9, 0x12, 0xeb, 0xf2, 0xe8, 0x9c, 0x64, 0x2c, 0xaa, + 0x64, 0x2c, 0x09, 0x92, 0x5a, 0x48, 0x93, 0xd4, 0x6d, 0xa8, 0xb1, 0x50, 0x32, 0xc3, 0x3f, 0xb1, + 0x17, 0xf2, 0x4f, 0xf4, 0x12, 0x9c, 0xe3, 0x1c, 0x42, 0x50, 0x59, 0x19, 0x3f, 0x4a, 0x3c, 0x18, + 0x36, 0xd9, 0x0b, 0xf1, 0xe9, 0x8a, 0x40, 0xf2, 0xff, 0x70, 0x3e, 0x21, 0x1b, 0x85, 0x28, 0x41, + 0xb4, 0x5a, 0x91, 0xf4, 0xbe, 0x8c, 0x55, 0xef, 0xc5, 0x0e, 0x8a, 0xb9, 0x2d, 0x82, 0x92, 0xe9, + 0x5a, 0x44, 0x06, 0x10, 0xfe, 0xcc, 0xf8, 0xee, 0xc8, 0x1d, 0xc8, 0x30, 0xc1, 0x1e, 0x99, 0x54, + 0x84, 0xa9, 0xaa, 0x00, 0x4b, 0xed, 0x77, 0x4a, 0x6c, 0x2f, 0xa6, 0xbb, 0xf3, 0x98, 0xa9, 0xf2, + 0x34, 0x99, 0x69, 0xe1, 0xbf, 0x63, 0xa6, 0xda, 0xbf, 0x94, 0x78, 0x4b, 0x23, 0xce, 0xf9, 0xd5, + 0x5c, 0x10, 0x87, 0xdf, 0x75, 0xbe, 0x41, 0x32, 0xfc, 0xca, 0x74, 0xa1, 0xcc, 0xb7, 0x21, 0x9d, + 0x2e, 0x54, 0x44, 0x40, 0xe6, 0x0d, 0xf4, 0x3a, 0xe7, 0xaa, 0xee, 0x91, 0xc4, 0xe4, 0x14, 0x21, + 0x11, 0x85, 0x9f, 0x1d, 0x59, 0xf1, 0xb9, 0xc7, 0xc4, 0x74, 0x21, 0x9d, 0xa0, 0x15, 0x6a, 0x8a, + 0xfa, 0x3e, 0x0f, 0x2a, 0x9b, 0x7a, 0xe0, 0x61, 0x93, 0x70, 0x50, 0x55, 0xf5, 0xb8, 0x43, 0xb3, + 0x00, 0x9d, 0x05, 0x77, 0x74, 0x17, 0xca, 0xe4, 0x98, 0x38, 0x94, 0xed, 0x11, 0x73, 0xeb, 0xf3, + 0x0b, 0xc9, 0x24, 0x71, 0x68, 0xb7, 0xcd, 0x9c, 0xf9, 0xf7, 0x27, 0xdb, 0x2d, 0xa1, 0xf3, 0x8a, + 0x3b, 0xb6, 0x29, 0x19, 0x7b, 0x74, 0xaa, 0x4b, 0x2b, 0xda, 0x4f, 0x0a, 0x8c, 0xd3, 0xa5, 0x80, + 0x7f, 0xae, 0x7b, 0xc3, 0x43, 0x53, 0x48, 0xd0, 0xfc, 0x7c, 0x2e, 0x7f, 0x01, 0x60, 0x80, 0x03, + 0xe3, 0x53, 0xec, 0x50, 0x62, 0x49, 0xbf, 0xab, 0x03, 0x1c, 0x7c, 0x9b, 0x77, 0x30, 0xaa, 0xc6, + 0x5e, 0x4f, 0x02, 0x62, 0xf1, 0x0d, 0x28, 0xea, 0x95, 0x01, 0x0e, 0x1e, 0x06, 0xc4, 0x4a, 0xac, + 0xb5, 0xf2, 0x34, 0xd6, 0x9a, 0xf6, 0x77, 0x75, 0xd6, 0xdf, 0x3f, 0x2d, 0xc4, 0xa7, 0x23, 0xa6, + 0xc0, 0xff, 0x9b, 0xbe, 0xf8, 0x35, 0xcf, 0x8b, 0xd3, 0xd1, 0x17, 0x7d, 0x07, 0xce, 0x45, 0xa7, + 0xd2, 0x98, 0xf0, 0xd3, 0x1a, 0x7e, 0x85, 0xab, 0x1d, 0xee, 0xd6, 0x71, 0xba, 0x3b, 0x40, 0x1f, + 0xc1, 0xb3, 0x33, 0x18, 0x14, 0x0d, 0x50, 0x58, 0x09, 0x8a, 0x9e, 0x49, 0x43, 0x51, 0x68, 0x3f, + 0xf6, 0x5e, 0xf1, 0xa9, 0x9c, 0x9a, 0x43, 0x96, 0x86, 0x25, 0x79, 0xc5, 0xdc, 0x6f, 0xe2, 0x12, + 0xd4, 0x7d, 0x42, 0xb1, 0xed, 0x18, 0xa9, 0xcc, 0x77, 0x43, 0x74, 0x8a, 0x90, 0xa0, 0x3d, 0x82, + 0x67, 0xe6, 0x32, 0x0b, 0xf4, 0x0d, 0x50, 0x63, 0x6a, 0xa2, 0x2c, 0xcd, 0x1c, 0xa3, 0x0c, 0x28, + 0xd6, 0xd0, 0x3e, 0x53, 0x62, 0xc3, 0xe9, 0xcc, 0xea, 0x5d, 0x28, 0xfb, 0x24, 0x98, 0x8c, 0x44, + 0x96, 0xd3, 0xd8, 0x7b, 0x6d, 0x15, 0x66, 0xc2, 0x7a, 0x27, 0x23, 0xaa, 0x4b, 0x13, 0xda, 0x07, + 0x50, 0x16, 0x3d, 0x08, 0xa0, 0x8c, 0x4d, 0x16, 0xc5, 0x5b, 0x6b, 0x48, 0x85, 0x75, 0xdc, 0x77, + 0x7d, 0xda, 0x52, 0x58, 0xb7, 0x4f, 0x3e, 0x26, 0x26, 0x6d, 0x15, 0xd0, 0x39, 0xe6, 0x10, 0xf6, + 0x6c, 0x88, 0x04, 0xaa, 0x55, 0x4c, 0x74, 0x89, 0xe4, 0xa6, 0x55, 0xd2, 0x5e, 0x65, 0xf9, 0xd3, + 0x02, 0xe2, 0x12, 0x67, 0x4a, 0x4a, 0x22, 0x53, 0xd2, 0x7e, 0x51, 0x80, 0xce, 0x62, 0x1e, 0x82, + 0xee, 0xcd, 0xac, 0xf8, 0x8d, 0x95, 0xa9, 0xcc, 0xcc, 0xb2, 0xd1, 0x65, 0x68, 0xf8, 0xe4, 0x88, + 0x50, 0x73, 0x28, 0x38, 0x92, 0x88, 0x72, 0x75, 0xbd, 0x2e, 0x7b, 0xb9, 0x52, 0x20, 0xc4, 0x12, + 0xab, 0x13, 0xdf, 0x9f, 0xaa, 0xcb, 0x35, 0xdf, 0x17, 0x9d, 0xda, 0xfd, 0x2c, 0x27, 0xaa, 0xb0, + 0xee, 0x13, 0xea, 0x4f, 0x5b, 0x05, 0x84, 0x98, 0x49, 0xea, 0xc7, 0x24, 0xad, 0x55, 0x44, 0xe7, + 0xa1, 0x19, 0x0e, 0x13, 0x76, 0x96, 0xb4, 0x3f, 0x29, 0xd0, 0x9c, 0x39, 0x1e, 0xe8, 0x0d, 0x58, + 0x17, 0xc4, 0x5b, 0x59, 0x5a, 0xbf, 0xe7, 0xe7, 0x5d, 0x9e, 0x28, 0xa1, 0x80, 0xf6, 0xa1, 0x4a, + 0x64, 0x7d, 0x42, 0x1e, 0xc9, 0xcb, 0x19, 0x65, 0x0c, 0xa9, 0x1f, 0xa9, 0xa1, 0x5b, 0xa0, 0x46, + 0x07, 0x3f, 0xa3, 0xf6, 0x15, 0xe1, 0x86, 0x34, 0x12, 0x2b, 0x6a, 0x07, 0x50, 0x4b, 0x4c, 0x0f, + 0xfd, 0x1f, 0xa8, 0x63, 0x7c, 0x22, 0x0b, 0x56, 0xa2, 0x04, 0x51, 0x1d, 0xe3, 0x13, 0x5e, 0xab, + 0x42, 0xcf, 0x42, 0x85, 0xbd, 0x1c, 0x60, 0x01, 0x23, 0x45, 0xbd, 0x3c, 0xc6, 0x27, 0xdf, 0xc2, + 0x81, 0xf6, 0x33, 0x05, 0x1a, 0xe9, 0x79, 0xa2, 0x97, 0x01, 0x31, 0x59, 0x3c, 0x20, 0x86, 0x33, + 0x19, 0x0b, 0x86, 0x16, 0x5a, 0x6c, 0x8e, 0xf1, 0xc9, 0xfe, 0x80, 0xdc, 0x9d, 0x8c, 0xf9, 0xd0, + 0x01, 0x7a, 0x0f, 0x5a, 0xa1, 0x70, 0xf8, 0x1b, 0x8d, 0xf4, 0xca, 0x73, 0x67, 0xca, 0x85, 0xb7, + 0xa4, 0x80, 0xa8, 0x16, 0xfe, 0xea, 0xcb, 0x6d, 0x45, 0x6f, 0x08, 0x7b, 0xe1, 0x1b, 0xed, 0x75, + 0x68, 0xce, 0xac, 0x18, 0x69, 0x50, 0xf7, 0x26, 0x7d, 0xe3, 0x31, 0x99, 0x1a, 0xdc, 0x25, 0x1c, + 0x01, 0x54, 0xbd, 0xe6, 0x4d, 0xfa, 0xef, 0x92, 0xe9, 0x03, 0xd6, 0xa5, 0x99, 0xd0, 0x48, 0x97, + 0xa3, 0xd8, 0xe9, 0xf0, 0xdd, 0x89, 0x63, 0xf1, 0x79, 0xaf, 0xeb, 0xa2, 0x81, 0x6e, 0xc2, 0xfa, + 0xb1, 0x2b, 0xb0, 0x74, 0x19, 0x8a, 0x3c, 0x72, 0x29, 0x49, 0x14, 0xb5, 0x84, 0x8e, 0x76, 0x97, + 0x79, 0x8a, 0x38, 0x74, 0x9f, 0x52, 0xdf, 0xee, 0x4f, 0x28, 0x49, 0x16, 0x57, 0x37, 0xe6, 0x14, + 0x57, 0x23, 0xb6, 0x14, 0x71, 0xad, 0xa2, 0xa8, 0xec, 0xf1, 0x86, 0xf6, 0x23, 0x05, 0xd6, 0xb9, + 0x41, 0x06, 0x99, 0xbc, 0x52, 0x25, 0x79, 0x38, 0x7b, 0x46, 0x26, 0x00, 0x0e, 0x07, 0x0a, 0xe7, + 0x7b, 0x79, 0x19, 0x58, 0x47, 0xd3, 0xea, 0x3e, 0x2f, 0x51, 0x7b, 0x33, 0x36, 0x90, 0x40, 0xee, + 0x84, 0x59, 0xed, 0x9f, 0x25, 0x28, 0x8b, 0x72, 0x20, 0x7a, 0x3b, 0x5d, 0x9c, 0xae, 0xed, 0x6d, + 0x2d, 0x72, 0x8e, 0x90, 0x92, 0xbe, 0x89, 0xb2, 0x83, 0x2b, 0xb3, 0x15, 0xdf, 0x6e, 0xed, 0xf4, + 0xc9, 0x76, 0x85, 0x33, 0xeb, 0xc3, 0x5b, 0x71, 0xf9, 0x77, 0x51, 0xf5, 0x33, 0xac, 0x35, 0x97, + 0x56, 0xae, 0x35, 0xf7, 0xa0, 0x9e, 0x48, 0x25, 0x6c, 0x4b, 0x16, 0x3f, 0xb6, 0x96, 0x1d, 0xe9, + 0xc3, 0x5b, 0x72, 0xfe, 0xb5, 0x28, 0xd5, 0x38, 0xb4, 0xd0, 0xb5, 0x74, 0x11, 0x94, 0x67, 0x24, + 0x82, 0x0a, 0x27, 0xea, 0x9a, 0x2c, 0x1f, 0x61, 0x87, 0x8d, 0x05, 0x36, 0x21, 0x22, 0x98, 0x71, + 0x95, 0x75, 0xf0, 0x97, 0x57, 0xa1, 0x19, 0x93, 0x76, 0x21, 0x52, 0x15, 0x56, 0xe2, 0x6e, 0x2e, + 0x78, 0x03, 0x36, 0x1d, 0x72, 0x42, 0x8d, 0x59, 0x69, 0x95, 0x4b, 0x23, 0xf6, 0xee, 0x51, 0x5a, + 0xe3, 0x32, 0x34, 0x62, 0x7a, 0xc0, 0x65, 0x41, 0x94, 0xa6, 0xa3, 0x5e, 0x2e, 0x96, 0xac, 0xfa, + 0xd5, 0x52, 0x55, 0xbf, 0x28, 0x49, 0x13, 0xf0, 0x2d, 0x8d, 0x6c, 0x70, 0x19, 0x9e, 0xa4, 0x09, + 0xf8, 0x15, 0x66, 0x2e, 0x41, 0x3d, 0xc4, 0x2c, 0x21, 0x57, 0xe7, 0x72, 0x1b, 0x61, 0x27, 0x17, + 0xba, 0x0e, 0x2d, 0xcf, 0x77, 0x3d, 0x37, 0x20, 0xbe, 0x81, 0x2d, 0xcb, 0x27, 0x41, 0xc0, 0x13, + 0xfd, 0x0d, 0xbd, 0x19, 0xf6, 0xef, 0x8b, 0x6e, 0xed, 0x55, 0xa8, 0x84, 0xb9, 0xe2, 0x26, 0xac, + 0x77, 0x23, 0xfc, 0x2d, 0xe9, 0xa2, 0xc1, 0x0e, 0xd4, 0xbe, 0xe7, 0xc9, 0x5f, 0x3f, 0xd8, 0xa3, + 0x36, 0x82, 0x8a, 0xdc, 0xb0, 0xb9, 0x35, 0xef, 0xf7, 0x60, 0xc3, 0xc3, 0x3e, 0x5b, 0x46, 0xb2, + 0xf2, 0xbd, 0xa8, 0xcc, 0x74, 0x0f, 0xfb, 0xf4, 0x3e, 0xa1, 0xa9, 0x02, 0x78, 0x8d, 0xeb, 0x8b, + 0x2e, 0xed, 0x4d, 0xa8, 0xa7, 0x64, 0xd8, 0x34, 0xa9, 0x4b, 0xf1, 0x28, 0x84, 0x11, 0xde, 0x88, + 0x66, 0x52, 0x88, 0x67, 0xa2, 0xdd, 0x04, 0x35, 0xda, 0x2b, 0x96, 0x44, 0x87, 0xae, 0x50, 0xa4, + 0xfb, 0x45, 0x93, 0x17, 0xf9, 0xdd, 0x4f, 0x65, 0x21, 0xb3, 0xa8, 0x8b, 0x86, 0x46, 0x12, 0xb0, + 0x27, 0x98, 0x1a, 0x7a, 0x0b, 0x2a, 0x12, 0xf6, 0xe4, 0x79, 0x5c, 0x54, 0xce, 0xbf, 0xc7, 0x71, + 0x30, 0x2c, 0xe7, 0x0b, 0x54, 0x8c, 0x87, 0x29, 0x24, 0x87, 0xf9, 0x21, 0x54, 0x43, 0x68, 0x4b, + 0xc7, 0x20, 0x31, 0xc2, 0xc5, 0xac, 0x18, 0x24, 0x07, 0x89, 0x15, 0xd9, 0xd7, 0x14, 0xd8, 0x03, + 0x87, 0x58, 0x46, 0x7c, 0x04, 0xf9, 0x98, 0x55, 0xbd, 0x29, 0x5e, 0xdc, 0x09, 0xcf, 0x97, 0x76, + 0x03, 0xca, 0x62, 0xae, 0x73, 0xf1, 0x6e, 0x0e, 0x6d, 0xd4, 0xfe, 0xa6, 0x40, 0x35, 0x0c, 0x4e, + 0x73, 0x95, 0x52, 0x8b, 0x28, 0x7c, 0xd5, 0x45, 0x3c, 0x7d, 0x48, 0x7a, 0x05, 0x10, 0xff, 0x52, + 0x8c, 0x63, 0x97, 0xda, 0xce, 0xc0, 0x10, 0x7b, 0x21, 0xb2, 0x9c, 0x16, 0x7f, 0xf3, 0x88, 0xbf, + 0xb8, 0xc7, 0xb7, 0xe5, 0xc7, 0x0a, 0x54, 0x23, 0x4e, 0xba, 0x6a, 0xe5, 0xfd, 0x02, 0x94, 0x25, + 0xef, 0x12, 0xa5, 0x77, 0xd9, 0x8a, 0xbe, 0xd1, 0x52, 0xe2, 0xb4, 0x74, 0xa0, 0x3a, 0x26, 0x14, + 0x73, 0x3f, 0x8b, 0x4a, 0x4b, 0xd4, 0x7e, 0xe9, 0x12, 0xd4, 0x12, 0x3f, 0x85, 0xa0, 0x0a, 0x14, + 0xef, 0x92, 0x4f, 0x5b, 0x6b, 0xa8, 0x06, 0x15, 0x9d, 0xf0, 0xea, 0x67, 0x4b, 0xd9, 0xfb, 0xac, + 0x06, 0xcd, 0xfd, 0xee, 0xc1, 0x21, 0xa3, 0x85, 0xb6, 0xc9, 0x43, 0x36, 0x7a, 0x1f, 0x4a, 0xbc, + 0x10, 0x95, 0xe3, 0xe6, 0x45, 0x27, 0x4f, 0x29, 0x1d, 0xe9, 0xb0, 0xce, 0xeb, 0x55, 0x28, 0xcf, + 0x85, 0x8c, 0x4e, 0xae, 0x0a, 0x3b, 0x9b, 0x24, 0xff, 0xea, 0x73, 0xdc, 0xd3, 0xe8, 0xe4, 0x29, + 0xbb, 0xa3, 0x8f, 0x40, 0x8d, 0x0b, 0x51, 0x79, 0x6f, 0x6f, 0x74, 0x72, 0x17, 0xe4, 0x99, 0xfd, + 0x38, 0xf5, 0xce, 0x7b, 0x77, 0xa1, 0x93, 0xbb, 0x12, 0x8d, 0x3e, 0x84, 0x4a, 0x58, 0xe4, 0xc8, + 0x77, 0xbf, 0xa2, 0x93, 0xb3, 0x58, 0xce, 0xb6, 0x4f, 0xd4, 0xa6, 0xf2, 0x5c, 0x22, 0xe9, 0xe4, + 0xfa, 0x45, 0x00, 0x3d, 0x84, 0xb2, 0xcc, 0x2e, 0x73, 0xdd, 0x9c, 0xe8, 0xe4, 0x2b, 0x81, 0x33, + 0x27, 0xc7, 0xd5, 0xbf, 0xbc, 0x17, 0x67, 0x3a, 0xb9, 0x7f, 0x0a, 0x41, 0x18, 0x20, 0x51, 0xb0, + 0xca, 0x7d, 0x23, 0xa6, 0x93, 0xff, 0x27, 0x0e, 0xf4, 0x3d, 0xa8, 0x46, 0x65, 0x89, 0x9c, 0x37, + 0x53, 0x3a, 0x79, 0x7f, 0x65, 0x40, 0x1f, 0x43, 0x3d, 0x9d, 0x89, 0xaf, 0x72, 0xdf, 0xa4, 0xb3, + 0xd2, 0xcf, 0x07, 0x6c, 0xac, 0x74, 0x72, 0xbe, 0xca, 0x2d, 0x94, 0xce, 0x4a, 0xbf, 0x29, 0xa0, + 0x63, 0x38, 0x77, 0x36, 0x9f, 0x5e, 0xf5, 0x6a, 0x4a, 0x67, 0xe5, 0xdf, 0x1a, 0xd0, 0x14, 0xd0, + 0x9c, 0x9c, 0x7c, 0xe5, 0xfb, 0x2a, 0x9d, 0xd5, 0x7f, 0x80, 0xe8, 0x1e, 0xfe, 0xfb, 0x2f, 0x5b, + 0xca, 0x6f, 0x4e, 0xb7, 0x94, 0xcf, 0x4f, 0xb7, 0x94, 0x2f, 0x4e, 0xb7, 0x94, 0x3f, 0x9e, 0x6e, + 0x29, 0x7f, 0x3e, 0xdd, 0x52, 0x7e, 0xff, 0xd7, 0x2d, 0xe5, 0xbb, 0x2f, 0x0f, 0x6c, 0x3a, 0x9c, + 0xf4, 0x77, 0x4c, 0x77, 0xbc, 0x1b, 0x9b, 0x4e, 0x3e, 0xc6, 0x97, 0x04, 0xfb, 0x65, 0x1e, 0x00, + 0x5f, 0xfb, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x3d, 0xc7, 0x11, 0xb1, 0x39, 0x28, 0x00, 0x00, } func (this *Request) Equal(that interface{}) bool { @@ -3483,6 +4217,102 @@ func (this *Request_Commit) Equal(that interface{}) bool { } return true } +func (this *Request_ListSnapshots) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Request_ListSnapshots) + if !ok { + that2, ok := that.(Request_ListSnapshots) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListSnapshots.Equal(that1.ListSnapshots) { + return false + } + return true +} +func (this *Request_OfferSnapshot) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Request_OfferSnapshot) + if !ok { + that2, ok := that.(Request_OfferSnapshot) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.OfferSnapshot.Equal(that1.OfferSnapshot) { + return false + } + return true +} +func (this *Request_LoadSnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Request_LoadSnapshotChunk) + if !ok { + that2, ok := that.(Request_LoadSnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.LoadSnapshotChunk.Equal(that1.LoadSnapshotChunk) { + return false + } + return true +} +func (this *Request_ApplySnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Request_ApplySnapshotChunk) + if !ok { + that2, ok := that.(Request_ApplySnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ApplySnapshotChunk.Equal(that1.ApplySnapshotChunk) { + return false + } + return true +} func (this *RequestEcho) Equal(that interface{}) bool { if that == nil { return this == nil @@ -3826,6 +4656,126 @@ func (this *RequestCommit) Equal(that interface{}) bool { } return true } +func (this *RequestListSnapshots) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestListSnapshots) + if !ok { + that2, ok := that.(RequestListSnapshots) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *RequestOfferSnapshot) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestOfferSnapshot) + if !ok { + that2, ok := that.(RequestOfferSnapshot) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Snapshot.Equal(that1.Snapshot) { + return false + } + if !bytes.Equal(this.AppHash, that1.AppHash) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *RequestLoadSnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestLoadSnapshotChunk) + if !ok { + that2, ok := that.(RequestLoadSnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Height != that1.Height { + return false + } + if this.Format != that1.Format { + return false + } + if this.Chunk != that1.Chunk { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *RequestApplySnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*RequestApplySnapshotChunk) + if !ok { + that2, ok := that.(RequestApplySnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Index != that1.Index { + return false + } + if !bytes.Equal(this.Chunk, that1.Chunk) { + return false + } + if this.Sender != that1.Sender { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} func (this *Response) Equal(that interface{}) bool { if that == nil { return this == nil @@ -4147,6 +5097,102 @@ func (this *Response_Commit) Equal(that interface{}) bool { } return true } +func (this *Response_ListSnapshots) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_ListSnapshots) + if !ok { + that2, ok := that.(Response_ListSnapshots) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListSnapshots.Equal(that1.ListSnapshots) { + return false + } + return true +} +func (this *Response_OfferSnapshot) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_OfferSnapshot) + if !ok { + that2, ok := that.(Response_OfferSnapshot) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.OfferSnapshot.Equal(that1.OfferSnapshot) { + return false + } + return true +} +func (this *Response_LoadSnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_LoadSnapshotChunk) + if !ok { + that2, ok := that.(Response_LoadSnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.LoadSnapshotChunk.Equal(that1.LoadSnapshotChunk) { + return false + } + return true +} +func (this *Response_ApplySnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Response_ApplySnapshotChunk) + if !ok { + that2, ok := that.(Response_ApplySnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ApplySnapshotChunk.Equal(that1.ApplySnapshotChunk) { + return false + } + return true +} func (this *ResponseException) Equal(that interface{}) bool { if that == nil { return this == nil @@ -4594,6 +5640,135 @@ func (this *ResponseCommit) Equal(that interface{}) bool { } return true } +func (this *ResponseListSnapshots) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseListSnapshots) + if !ok { + that2, ok := that.(ResponseListSnapshots) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Snapshots) != len(that1.Snapshots) { + return false + } + for i := range this.Snapshots { + if !this.Snapshots[i].Equal(that1.Snapshots[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResponseOfferSnapshot) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseOfferSnapshot) + if !ok { + that2, ok := that.(ResponseOfferSnapshot) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Result != that1.Result { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResponseLoadSnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseLoadSnapshotChunk) + if !ok { + that2, ok := that.(ResponseLoadSnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Chunk, that1.Chunk) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *ResponseApplySnapshotChunk) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResponseApplySnapshotChunk) + if !ok { + that2, ok := that.(ResponseApplySnapshotChunk) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Result != that1.Result { + return false + } + if len(this.RefetchChunks) != len(that1.RefetchChunks) { + return false + } + for i := range this.RefetchChunks { + if this.RefetchChunks[i] != that1.RefetchChunks[i] { + return false + } + } + if len(this.RejectSenders) != len(that1.RejectSenders) { + return false + } + for i := range this.RejectSenders { + if this.RejectSenders[i] != that1.RejectSenders[i] { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} func (this *ConsensusParams) Equal(that interface{}) bool { if that == nil { return this == nil @@ -5137,6 +6312,45 @@ func (this *Evidence) Equal(that interface{}) bool { } return true } +func (this *Snapshot) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Snapshot) + if !ok { + that2, ok := that.(Snapshot) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Height != that1.Height { + return false + } + if this.Format != that1.Format { + return false + } + if this.Chunks != that1.Chunks { + return false + } + if !bytes.Equal(this.Hash, that1.Hash) { + return false + } + if !bytes.Equal(this.Metadata, that1.Metadata) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} // Reference imports to suppress errors if they are not otherwise used. var _ context.Context @@ -5161,6 +6375,10 @@ type ABCIApplicationClient interface { InitChain(ctx context.Context, in *RequestInitChain, opts ...grpc.CallOption) (*ResponseInitChain, error) BeginBlock(ctx context.Context, in *RequestBeginBlock, opts ...grpc.CallOption) (*ResponseBeginBlock, error) EndBlock(ctx context.Context, in *RequestEndBlock, opts ...grpc.CallOption) (*ResponseEndBlock, error) + ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) + OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) + LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) } type aBCIApplicationClient struct { @@ -5270,6 +6488,42 @@ func (c *aBCIApplicationClient) EndBlock(ctx context.Context, in *RequestEndBloc return out, nil } +func (c *aBCIApplicationClient) ListSnapshots(ctx context.Context, in *RequestListSnapshots, opts ...grpc.CallOption) (*ResponseListSnapshots, error) { + out := new(ResponseListSnapshots) + err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/ListSnapshots", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) OfferSnapshot(ctx context.Context, in *RequestOfferSnapshot, opts ...grpc.CallOption) (*ResponseOfferSnapshot, error) { + out := new(ResponseOfferSnapshot) + err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/OfferSnapshot", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) LoadSnapshotChunk(ctx context.Context, in *RequestLoadSnapshotChunk, opts ...grpc.CallOption) (*ResponseLoadSnapshotChunk, error) { + out := new(ResponseLoadSnapshotChunk) + err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/LoadSnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *aBCIApplicationClient) ApplySnapshotChunk(ctx context.Context, in *RequestApplySnapshotChunk, opts ...grpc.CallOption) (*ResponseApplySnapshotChunk, error) { + out := new(ResponseApplySnapshotChunk) + err := c.cc.Invoke(ctx, "/tendermint.abci.types.ABCIApplication/ApplySnapshotChunk", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // ABCIApplicationServer is the server API for ABCIApplication service. type ABCIApplicationServer interface { Echo(context.Context, *RequestEcho) (*ResponseEcho, error) @@ -5283,6 +6537,10 @@ type ABCIApplicationServer interface { InitChain(context.Context, *RequestInitChain) (*ResponseInitChain, error) BeginBlock(context.Context, *RequestBeginBlock) (*ResponseBeginBlock, error) EndBlock(context.Context, *RequestEndBlock) (*ResponseEndBlock, error) + ListSnapshots(context.Context, *RequestListSnapshots) (*ResponseListSnapshots, error) + OfferSnapshot(context.Context, *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) + LoadSnapshotChunk(context.Context, *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) + ApplySnapshotChunk(context.Context, *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) } // UnimplementedABCIApplicationServer can be embedded to have forward compatible implementations. @@ -5322,6 +6580,18 @@ func (*UnimplementedABCIApplicationServer) BeginBlock(ctx context.Context, req * func (*UnimplementedABCIApplicationServer) EndBlock(ctx context.Context, req *RequestEndBlock) (*ResponseEndBlock, error) { return nil, status.Errorf(codes.Unimplemented, "method EndBlock not implemented") } +func (*UnimplementedABCIApplicationServer) ListSnapshots(ctx context.Context, req *RequestListSnapshots) (*ResponseListSnapshots, error) { + return nil, status.Errorf(codes.Unimplemented, "method ListSnapshots not implemented") +} +func (*UnimplementedABCIApplicationServer) OfferSnapshot(ctx context.Context, req *RequestOfferSnapshot) (*ResponseOfferSnapshot, error) { + return nil, status.Errorf(codes.Unimplemented, "method OfferSnapshot not implemented") +} +func (*UnimplementedABCIApplicationServer) LoadSnapshotChunk(ctx context.Context, req *RequestLoadSnapshotChunk) (*ResponseLoadSnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method LoadSnapshotChunk not implemented") +} +func (*UnimplementedABCIApplicationServer) ApplySnapshotChunk(ctx context.Context, req *RequestApplySnapshotChunk) (*ResponseApplySnapshotChunk, error) { + return nil, status.Errorf(codes.Unimplemented, "method ApplySnapshotChunk not implemented") +} func RegisterABCIApplicationServer(s *grpc.Server, srv ABCIApplicationServer) { s.RegisterService(&_ABCIApplication_serviceDesc, srv) @@ -5525,6 +6795,78 @@ func _ABCIApplication_EndBlock_Handler(srv interface{}, ctx context.Context, dec return interceptor(ctx, in, info, handler) } +func _ABCIApplication_ListSnapshots_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestListSnapshots) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.types.ABCIApplication/ListSnapshots", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ListSnapshots(ctx, req.(*RequestListSnapshots)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_OfferSnapshot_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestOfferSnapshot) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.types.ABCIApplication/OfferSnapshot", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).OfferSnapshot(ctx, req.(*RequestOfferSnapshot)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_LoadSnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestLoadSnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.types.ABCIApplication/LoadSnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).LoadSnapshotChunk(ctx, req.(*RequestLoadSnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + +func _ABCIApplication_ApplySnapshotChunk_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(RequestApplySnapshotChunk) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/tendermint.abci.types.ABCIApplication/ApplySnapshotChunk", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ABCIApplicationServer).ApplySnapshotChunk(ctx, req.(*RequestApplySnapshotChunk)) + } + return interceptor(ctx, in, info, handler) +} + var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ ServiceName: "tendermint.abci.types.ABCIApplication", HandlerType: (*ABCIApplicationServer)(nil), @@ -5573,6 +6915,22 @@ var _ABCIApplication_serviceDesc = grpc.ServiceDesc{ MethodName: "EndBlock", Handler: _ABCIApplication_EndBlock_Handler, }, + { + MethodName: "ListSnapshots", + Handler: _ABCIApplication_ListSnapshots_Handler, + }, + { + MethodName: "OfferSnapshot", + Handler: _ABCIApplication_OfferSnapshot_Handler, + }, + { + MethodName: "LoadSnapshotChunk", + Handler: _ABCIApplication_LoadSnapshotChunk_Handler, + }, + { + MethodName: "ApplySnapshotChunk", + Handler: _ABCIApplication_ApplySnapshotChunk_Handler, + }, }, Streams: []grpc.StreamDesc{}, Metadata: "abci/types/types.proto", @@ -5824,6 +7182,92 @@ func (m *Request_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *Request_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Request_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Request_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Request_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Request_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} func (m *Request_DeliverTx) MarshalTo(dAtA []byte) (int, error) { size := m.Size() return m.MarshalToSizedBuffer(dAtA[:size]) @@ -6057,12 +7501,12 @@ func (m *RequestInitChain) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x12 } - n13, err13 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err13 != nil { - return 0, err13 + n17, err17 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err17 != nil { + return 0, err17 } - i -= n13 - i = encodeVarintTypes(dAtA, i, uint64(n13)) + i -= n17 + i = encodeVarintTypes(dAtA, i, uint64(n17)) i-- dAtA[i] = 0xa return len(dAtA) - i, nil @@ -6324,6 +7768,167 @@ func (m *RequestCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *RequestListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + +func (m *RequestOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.AppHash) > 0 { + i -= len(m.AppHash) + copy(dAtA[i:], m.AppHash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.AppHash))) + i-- + dAtA[i] = 0x12 + } + if m.Snapshot != nil { + { + size, err := m.Snapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *RequestLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Chunk != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunk)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *RequestApplySnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *RequestApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *RequestApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Sender) > 0 { + i -= len(m.Sender) + copy(dAtA[i:], m.Sender) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Sender))) + i-- + dAtA[i] = 0x1a + } + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0x12 + } + if m.Index != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Index)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *Response) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -6612,6 +8217,92 @@ func (m *Response_Commit) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *Response_ListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ListSnapshots != nil { + { + size, err := m.ListSnapshots.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x6a + } + return len(dAtA) - i, nil +} +func (m *Response_OfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_OfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.OfferSnapshot != nil { + { + size, err := m.OfferSnapshot.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x72 + } + return len(dAtA) - i, nil +} +func (m *Response_LoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_LoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.LoadSnapshotChunk != nil { + { + size, err := m.LoadSnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x7a + } + return len(dAtA) - i, nil +} +func (m *Response_ApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Response_ApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.ApplySnapshotChunk != nil { + { + size, err := m.ApplySnapshotChunk.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x82 + } + return len(dAtA) - i, nil +} func (m *ResponseException) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7268,6 +8959,172 @@ func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ResponseListSnapshots) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseListSnapshots) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseListSnapshots) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Snapshots) > 0 { + for iNdEx := len(m.Snapshots) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Snapshots[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintTypes(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ResponseOfferSnapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseOfferSnapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseOfferSnapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *ResponseLoadSnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseLoadSnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseLoadSnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Chunk) > 0 { + i -= len(m.Chunk) + copy(dAtA[i:], m.Chunk) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Chunk))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ResponseApplySnapshotChunk) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResponseApplySnapshotChunk) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResponseApplySnapshotChunk) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.RejectSenders) > 0 { + for iNdEx := len(m.RejectSenders) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.RejectSenders[iNdEx]) + copy(dAtA[i:], m.RejectSenders[iNdEx]) + i = encodeVarintTypes(dAtA, i, uint64(len(m.RejectSenders[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.RefetchChunks) > 0 { + dAtA41 := make([]byte, len(m.RefetchChunks)*10) + var j40 int + for _, num := range m.RefetchChunks { + for num >= 1<<7 { + dAtA41[j40] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j40++ + } + dAtA41[j40] = uint8(num) + j40++ + } + i -= j40 + copy(dAtA[i:], dAtA41[:j40]) + i = encodeVarintTypes(dAtA, i, uint64(j40)) + i-- + dAtA[i] = 0x12 + } + if m.Result != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Result)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *ConsensusParams) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -7392,12 +9249,12 @@ func (m *EvidenceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } - n34, err34 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) - if err34 != nil { - return 0, err34 + n45, err45 := github_com_gogo_protobuf_types.StdDurationMarshalTo(m.MaxAgeDuration, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdDuration(m.MaxAgeDuration):]) + if err45 != nil { + return 0, err45 } - i -= n34 - i = encodeVarintTypes(dAtA, i, uint64(n34)) + i -= n45 + i = encodeVarintTypes(dAtA, i, uint64(n45)) i-- dAtA[i] = 0x12 if m.MaxAgeNumBlocks != 0 { @@ -7686,12 +9543,12 @@ func (m *Header) MarshalToSizedBuffer(dAtA []byte) (int, error) { } i-- dAtA[i] = 0x2a - n36, err36 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err36 != nil { - return 0, err36 + n47, err47 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err47 != nil { + return 0, err47 } - i -= n36 - i = encodeVarintTypes(dAtA, i, uint64(n36)) + i -= n47 + i = encodeVarintTypes(dAtA, i, uint64(n47)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -8037,12 +9894,12 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x28 } - n41, err41 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) - if err41 != nil { - return 0, err41 + n52, err52 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Time):]) + if err52 != nil { + return 0, err52 } - i -= n41 - i = encodeVarintTypes(dAtA, i, uint64(n41)) + i -= n52 + i = encodeVarintTypes(dAtA, i, uint64(n52)) i-- dAtA[i] = 0x22 if m.Height != 0 { @@ -8070,6 +9927,62 @@ func (m *Evidence) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *Snapshot) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Snapshot) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Snapshot) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Metadata) > 0 { + i -= len(m.Metadata) + copy(dAtA[i:], m.Metadata) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Metadata))) + i-- + dAtA[i] = 0x2a + } + if len(m.Hash) > 0 { + i -= len(m.Hash) + copy(dAtA[i:], m.Hash) + i = encodeVarintTypes(dAtA, i, uint64(len(m.Hash))) + i-- + dAtA[i] = 0x22 + } + if m.Chunks != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Chunks)) + i-- + dAtA[i] = 0x18 + } + if m.Format != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Format)) + i-- + dAtA[i] = 0x10 + } + if m.Height != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.Height)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { offset -= sovTypes(v) base := offset @@ -8083,7 +9996,7 @@ func encodeVarintTypes(dAtA []byte, offset int, v uint64) int { } func NewPopulatedRequest(r randyTypes, easy bool) *Request { this := &Request{} - oneofNumber_Value := []int32{2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 19}[r.Intn(11)] + oneofNumber_Value := []int32{2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 13, 14, 15, 16, 19}[r.Intn(15)] switch oneofNumber_Value { case 2: this.Value = NewPopulatedRequest_Echo(r, easy) @@ -8105,6 +10018,14 @@ func NewPopulatedRequest(r randyTypes, easy bool) *Request { this.Value = NewPopulatedRequest_EndBlock(r, easy) case 12: this.Value = NewPopulatedRequest_Commit(r, easy) + case 13: + this.Value = NewPopulatedRequest_ListSnapshots(r, easy) + case 14: + this.Value = NewPopulatedRequest_OfferSnapshot(r, easy) + case 15: + this.Value = NewPopulatedRequest_LoadSnapshotChunk(r, easy) + case 16: + this.Value = NewPopulatedRequest_ApplySnapshotChunk(r, easy) case 19: this.Value = NewPopulatedRequest_DeliverTx(r, easy) } @@ -8164,6 +10085,26 @@ func NewPopulatedRequest_Commit(r randyTypes, easy bool) *Request_Commit { this.Commit = NewPopulatedRequestCommit(r, easy) return this } +func NewPopulatedRequest_ListSnapshots(r randyTypes, easy bool) *Request_ListSnapshots { + this := &Request_ListSnapshots{} + this.ListSnapshots = NewPopulatedRequestListSnapshots(r, easy) + return this +} +func NewPopulatedRequest_OfferSnapshot(r randyTypes, easy bool) *Request_OfferSnapshot { + this := &Request_OfferSnapshot{} + this.OfferSnapshot = NewPopulatedRequestOfferSnapshot(r, easy) + return this +} +func NewPopulatedRequest_LoadSnapshotChunk(r randyTypes, easy bool) *Request_LoadSnapshotChunk { + this := &Request_LoadSnapshotChunk{} + this.LoadSnapshotChunk = NewPopulatedRequestLoadSnapshotChunk(r, easy) + return this +} +func NewPopulatedRequest_ApplySnapshotChunk(r randyTypes, easy bool) *Request_ApplySnapshotChunk { + this := &Request_ApplySnapshotChunk{} + this.ApplySnapshotChunk = NewPopulatedRequestApplySnapshotChunk(r, easy) + return this +} func NewPopulatedRequest_DeliverTx(r randyTypes, easy bool) *Request_DeliverTx { this := &Request_DeliverTx{} this.DeliverTx = NewPopulatedRequestDeliverTx(r, easy) @@ -8325,9 +10266,59 @@ func NewPopulatedRequestCommit(r randyTypes, easy bool) *RequestCommit { return this } +func NewPopulatedRequestListSnapshots(r randyTypes, easy bool) *RequestListSnapshots { + this := &RequestListSnapshots{} + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 1) + } + return this +} + +func NewPopulatedRequestOfferSnapshot(r randyTypes, easy bool) *RequestOfferSnapshot { + this := &RequestOfferSnapshot{} + if r.Intn(5) != 0 { + this.Snapshot = NewPopulatedSnapshot(r, easy) + } + v13 := r.Intn(100) + this.AppHash = make([]byte, v13) + for i := 0; i < v13; i++ { + this.AppHash[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + } + return this +} + +func NewPopulatedRequestLoadSnapshotChunk(r randyTypes, easy bool) *RequestLoadSnapshotChunk { + this := &RequestLoadSnapshotChunk{} + this.Height = uint64(uint64(r.Uint32())) + this.Format = uint32(r.Uint32()) + this.Chunk = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) + } + return this +} + +func NewPopulatedRequestApplySnapshotChunk(r randyTypes, easy bool) *RequestApplySnapshotChunk { + this := &RequestApplySnapshotChunk{} + this.Index = uint32(r.Uint32()) + v14 := r.Intn(100) + this.Chunk = make([]byte, v14) + for i := 0; i < v14; i++ { + this.Chunk[i] = byte(r.Intn(256)) + } + this.Sender = string(randStringTypes(r)) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) + } + return this +} + func NewPopulatedResponse(r randyTypes, easy bool) *Response { this := &Response{} - oneofNumber_Value := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}[r.Intn(12)] + oneofNumber_Value := []int32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}[r.Intn(16)] switch oneofNumber_Value { case 1: this.Value = NewPopulatedResponse_Exception(r, easy) @@ -8353,9 +10344,17 @@ func NewPopulatedResponse(r randyTypes, easy bool) *Response { this.Value = NewPopulatedResponse_EndBlock(r, easy) case 12: this.Value = NewPopulatedResponse_Commit(r, easy) + case 13: + this.Value = NewPopulatedResponse_ListSnapshots(r, easy) + case 14: + this.Value = NewPopulatedResponse_OfferSnapshot(r, easy) + case 15: + this.Value = NewPopulatedResponse_LoadSnapshotChunk(r, easy) + case 16: + this.Value = NewPopulatedResponse_ApplySnapshotChunk(r, easy) } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 13) + this.XXX_unrecognized = randUnrecognizedTypes(r, 17) } return this } @@ -8420,6 +10419,26 @@ func NewPopulatedResponse_Commit(r randyTypes, easy bool) *Response_Commit { this.Commit = NewPopulatedResponseCommit(r, easy) return this } +func NewPopulatedResponse_ListSnapshots(r randyTypes, easy bool) *Response_ListSnapshots { + this := &Response_ListSnapshots{} + this.ListSnapshots = NewPopulatedResponseListSnapshots(r, easy) + return this +} +func NewPopulatedResponse_OfferSnapshot(r randyTypes, easy bool) *Response_OfferSnapshot { + this := &Response_OfferSnapshot{} + this.OfferSnapshot = NewPopulatedResponseOfferSnapshot(r, easy) + return this +} +func NewPopulatedResponse_LoadSnapshotChunk(r randyTypes, easy bool) *Response_LoadSnapshotChunk { + this := &Response_LoadSnapshotChunk{} + this.LoadSnapshotChunk = NewPopulatedResponseLoadSnapshotChunk(r, easy) + return this +} +func NewPopulatedResponse_ApplySnapshotChunk(r randyTypes, easy bool) *Response_ApplySnapshotChunk { + this := &Response_ApplySnapshotChunk{} + this.ApplySnapshotChunk = NewPopulatedResponseApplySnapshotChunk(r, easy) + return this +} func NewPopulatedResponseException(r randyTypes, easy bool) *ResponseException { this := &ResponseException{} this.Error = string(randStringTypes(r)) @@ -8455,9 +10474,9 @@ func NewPopulatedResponseInfo(r randyTypes, easy bool) *ResponseInfo { if r.Intn(2) == 0 { this.LastBlockHeight *= -1 } - v13 := r.Intn(100) - this.LastBlockAppHash = make([]byte, v13) - for i := 0; i < v13; i++ { + v15 := r.Intn(100) + this.LastBlockAppHash = make([]byte, v15) + for i := 0; i < v15; i++ { this.LastBlockAppHash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8483,11 +10502,11 @@ func NewPopulatedResponseInitChain(r randyTypes, easy bool) *ResponseInitChain { this.ConsensusParams = NewPopulatedConsensusParams(r, easy) } if r.Intn(5) != 0 { - v14 := r.Intn(5) - this.Validators = make([]ValidatorUpdate, v14) - for i := 0; i < v14; i++ { - v15 := NewPopulatedValidatorUpdate(r, easy) - this.Validators[i] = *v15 + v16 := r.Intn(5) + this.Validators = make([]ValidatorUpdate, v16) + for i := 0; i < v16; i++ { + v17 := NewPopulatedValidatorUpdate(r, easy) + this.Validators[i] = *v17 } } if !easy && r.Intn(10) != 0 { @@ -8505,14 +10524,14 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { if r.Intn(2) == 0 { this.Index *= -1 } - v16 := r.Intn(100) - this.Key = make([]byte, v16) - for i := 0; i < v16; i++ { + v18 := r.Intn(100) + this.Key = make([]byte, v18) + for i := 0; i < v18; i++ { this.Key[i] = byte(r.Intn(256)) } - v17 := r.Intn(100) - this.Value = make([]byte, v17) - for i := 0; i < v17; i++ { + v19 := r.Intn(100) + this.Value = make([]byte, v19) + for i := 0; i < v19; i++ { this.Value[i] = byte(r.Intn(256)) } if r.Intn(5) != 0 { @@ -8532,11 +10551,11 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock { this := &ResponseBeginBlock{} if r.Intn(5) != 0 { - v18 := r.Intn(5) - this.Events = make([]Event, v18) - for i := 0; i < v18; i++ { - v19 := NewPopulatedEvent(r, easy) - this.Events[i] = *v19 + v20 := r.Intn(5) + this.Events = make([]Event, v20) + for i := 0; i < v20; i++ { + v21 := NewPopulatedEvent(r, easy) + this.Events[i] = *v21 } } if !easy && r.Intn(10) != 0 { @@ -8548,9 +10567,9 @@ func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this := &ResponseCheckTx{} this.Code = uint32(r.Uint32()) - v20 := r.Intn(100) - this.Data = make([]byte, v20) - for i := 0; i < v20; i++ { + v22 := r.Intn(100) + this.Data = make([]byte, v22) + for i := 0; i < v22; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -8564,11 +10583,11 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this.GasUsed *= -1 } if r.Intn(5) != 0 { - v21 := r.Intn(5) - this.Events = make([]Event, v21) - for i := 0; i < v21; i++ { - v22 := NewPopulatedEvent(r, easy) - this.Events[i] = *v22 + v23 := r.Intn(5) + this.Events = make([]Event, v23) + for i := 0; i < v23; i++ { + v24 := NewPopulatedEvent(r, easy) + this.Events[i] = *v24 } } this.Codespace = string(randStringTypes(r)) @@ -8581,9 +10600,9 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this := &ResponseDeliverTx{} this.Code = uint32(r.Uint32()) - v23 := r.Intn(100) - this.Data = make([]byte, v23) - for i := 0; i < v23; i++ { + v25 := r.Intn(100) + this.Data = make([]byte, v25) + for i := 0; i < v25; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -8597,11 +10616,11 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this.GasUsed *= -1 } if r.Intn(5) != 0 { - v24 := r.Intn(5) - this.Events = make([]Event, v24) - for i := 0; i < v24; i++ { - v25 := NewPopulatedEvent(r, easy) - this.Events[i] = *v25 + v26 := r.Intn(5) + this.Events = make([]Event, v26) + for i := 0; i < v26; i++ { + v27 := NewPopulatedEvent(r, easy) + this.Events[i] = *v27 } } this.Codespace = string(randStringTypes(r)) @@ -8614,22 +10633,22 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this := &ResponseEndBlock{} if r.Intn(5) != 0 { - v26 := r.Intn(5) - this.ValidatorUpdates = make([]ValidatorUpdate, v26) - for i := 0; i < v26; i++ { - v27 := NewPopulatedValidatorUpdate(r, easy) - this.ValidatorUpdates[i] = *v27 + v28 := r.Intn(5) + this.ValidatorUpdates = make([]ValidatorUpdate, v28) + for i := 0; i < v28; i++ { + v29 := NewPopulatedValidatorUpdate(r, easy) + this.ValidatorUpdates[i] = *v29 } } if r.Intn(5) != 0 { this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) } if r.Intn(5) != 0 { - v28 := r.Intn(5) - this.Events = make([]Event, v28) - for i := 0; i < v28; i++ { - v29 := NewPopulatedEvent(r, easy) - this.Events[i] = *v29 + v30 := r.Intn(5) + this.Events = make([]Event, v30) + for i := 0; i < v30; i++ { + v31 := NewPopulatedEvent(r, easy) + this.Events[i] = *v31 } } if !easy && r.Intn(10) != 0 { @@ -8640,9 +10659,9 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { this := &ResponseCommit{} - v30 := r.Intn(100) - this.Data = make([]byte, v30) - for i := 0; i < v30; i++ { + v32 := r.Intn(100) + this.Data = make([]byte, v32) + for i := 0; i < v32; i++ { this.Data[i] = byte(r.Intn(256)) } this.RetainHeight = int64(r.Int63()) @@ -8655,6 +10674,62 @@ func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { return this } +func NewPopulatedResponseListSnapshots(r randyTypes, easy bool) *ResponseListSnapshots { + this := &ResponseListSnapshots{} + if r.Intn(5) != 0 { + v33 := r.Intn(5) + this.Snapshots = make([]*Snapshot, v33) + for i := 0; i < v33; i++ { + this.Snapshots[i] = NewPopulatedSnapshot(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + } + return this +} + +func NewPopulatedResponseOfferSnapshot(r randyTypes, easy bool) *ResponseOfferSnapshot { + this := &ResponseOfferSnapshot{} + this.Result = ResponseOfferSnapshot_Result([]int32{0, 1, 2, 3, 4}[r.Intn(5)]) + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + } + return this +} + +func NewPopulatedResponseLoadSnapshotChunk(r randyTypes, easy bool) *ResponseLoadSnapshotChunk { + this := &ResponseLoadSnapshotChunk{} + v34 := r.Intn(100) + this.Chunk = make([]byte, v34) + for i := 0; i < v34; i++ { + this.Chunk[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 2) + } + return this +} + +func NewPopulatedResponseApplySnapshotChunk(r randyTypes, easy bool) *ResponseApplySnapshotChunk { + this := &ResponseApplySnapshotChunk{} + this.Result = ResponseApplySnapshotChunk_Result([]int32{0, 1, 2, 3, 4}[r.Intn(5)]) + v35 := r.Intn(10) + this.RefetchChunks = make([]uint32, v35) + for i := 0; i < v35; i++ { + this.RefetchChunks[i] = uint32(r.Uint32()) + } + v36 := r.Intn(10) + this.RejectSenders = make([]string, v36) + for i := 0; i < v36; i++ { + this.RejectSenders[i] = string(randStringTypes(r)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) + } + return this +} + func NewPopulatedConsensusParams(r randyTypes, easy bool) *ConsensusParams { this := &ConsensusParams{} if r.Intn(5) != 0 { @@ -8694,8 +10769,8 @@ func NewPopulatedEvidenceParams(r randyTypes, easy bool) *EvidenceParams { if r.Intn(2) == 0 { this.MaxAgeNumBlocks *= -1 } - v31 := github_com_gogo_protobuf_types.NewPopulatedStdDuration(r, easy) - this.MaxAgeDuration = *v31 + v37 := github_com_gogo_protobuf_types.NewPopulatedStdDuration(r, easy) + this.MaxAgeDuration = *v37 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -8704,9 +10779,9 @@ func NewPopulatedEvidenceParams(r randyTypes, easy bool) *EvidenceParams { func NewPopulatedValidatorParams(r randyTypes, easy bool) *ValidatorParams { this := &ValidatorParams{} - v32 := r.Intn(10) - this.PubKeyTypes = make([]string, v32) - for i := 0; i < v32; i++ { + v38 := r.Intn(10) + this.PubKeyTypes = make([]string, v38) + for i := 0; i < v38; i++ { this.PubKeyTypes[i] = string(randStringTypes(r)) } if !easy && r.Intn(10) != 0 { @@ -8722,11 +10797,11 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this.Round *= -1 } if r.Intn(5) != 0 { - v33 := r.Intn(5) - this.Votes = make([]VoteInfo, v33) - for i := 0; i < v33; i++ { - v34 := NewPopulatedVoteInfo(r, easy) - this.Votes[i] = *v34 + v39 := r.Intn(5) + this.Votes = make([]VoteInfo, v39) + for i := 0; i < v39; i++ { + v40 := NewPopulatedVoteInfo(r, easy) + this.Votes[i] = *v40 } } if !easy && r.Intn(10) != 0 { @@ -8737,14 +10812,14 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { func NewPopulatedEventAttribute(r randyTypes, easy bool) *EventAttribute { this := &EventAttribute{} - v35 := r.Intn(100) - this.Key = make([]byte, v35) - for i := 0; i < v35; i++ { + v41 := r.Intn(100) + this.Key = make([]byte, v41) + for i := 0; i < v41; i++ { this.Key[i] = byte(r.Intn(256)) } - v36 := r.Intn(100) - this.Value = make([]byte, v36) - for i := 0; i < v36; i++ { + v42 := r.Intn(100) + this.Value = make([]byte, v42) + for i := 0; i < v42; i++ { this.Value[i] = byte(r.Intn(256)) } this.Index = bool(bool(r.Intn(2) == 0)) @@ -8758,11 +10833,11 @@ func NewPopulatedEvent(r randyTypes, easy bool) *Event { this := &Event{} this.Type = string(randStringTypes(r)) if r.Intn(5) != 0 { - v37 := r.Intn(5) - this.Attributes = make([]EventAttribute, v37) - for i := 0; i < v37; i++ { - v38 := NewPopulatedEventAttribute(r, easy) - this.Attributes[i] = *v38 + v43 := r.Intn(5) + this.Attributes = make([]EventAttribute, v43) + for i := 0; i < v43; i++ { + v44 := NewPopulatedEventAttribute(r, easy) + this.Attributes[i] = *v44 } } if !easy && r.Intn(10) != 0 { @@ -8773,60 +10848,60 @@ func NewPopulatedEvent(r randyTypes, easy bool) *Event { func NewPopulatedHeader(r randyTypes, easy bool) *Header { this := &Header{} - v39 := NewPopulatedVersion(r, easy) - this.Version = *v39 + v45 := NewPopulatedVersion(r, easy) + this.Version = *v45 this.ChainID = string(randStringTypes(r)) this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v40 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v40 - v41 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v41 - v42 := r.Intn(100) - this.LastCommitHash = make([]byte, v42) - for i := 0; i < v42; i++ { + v46 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v46 + v47 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v47 + v48 := r.Intn(100) + this.LastCommitHash = make([]byte, v48) + for i := 0; i < v48; i++ { this.LastCommitHash[i] = byte(r.Intn(256)) } - v43 := r.Intn(100) - this.DataHash = make([]byte, v43) - for i := 0; i < v43; i++ { + v49 := r.Intn(100) + this.DataHash = make([]byte, v49) + for i := 0; i < v49; i++ { this.DataHash[i] = byte(r.Intn(256)) } - v44 := r.Intn(100) - this.ValidatorsHash = make([]byte, v44) - for i := 0; i < v44; i++ { + v50 := r.Intn(100) + this.ValidatorsHash = make([]byte, v50) + for i := 0; i < v50; i++ { this.ValidatorsHash[i] = byte(r.Intn(256)) } - v45 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v45) - for i := 0; i < v45; i++ { + v51 := r.Intn(100) + this.NextValidatorsHash = make([]byte, v51) + for i := 0; i < v51; i++ { this.NextValidatorsHash[i] = byte(r.Intn(256)) } - v46 := r.Intn(100) - this.ConsensusHash = make([]byte, v46) - for i := 0; i < v46; i++ { + v52 := r.Intn(100) + this.ConsensusHash = make([]byte, v52) + for i := 0; i < v52; i++ { this.ConsensusHash[i] = byte(r.Intn(256)) } - v47 := r.Intn(100) - this.AppHash = make([]byte, v47) - for i := 0; i < v47; i++ { + v53 := r.Intn(100) + this.AppHash = make([]byte, v53) + for i := 0; i < v53; i++ { this.AppHash[i] = byte(r.Intn(256)) } - v48 := r.Intn(100) - this.LastResultsHash = make([]byte, v48) - for i := 0; i < v48; i++ { + v54 := r.Intn(100) + this.LastResultsHash = make([]byte, v54) + for i := 0; i < v54; i++ { this.LastResultsHash[i] = byte(r.Intn(256)) } - v49 := r.Intn(100) - this.EvidenceHash = make([]byte, v49) - for i := 0; i < v49; i++ { + v55 := r.Intn(100) + this.EvidenceHash = make([]byte, v55) + for i := 0; i < v55; i++ { this.EvidenceHash[i] = byte(r.Intn(256)) } - v50 := r.Intn(100) - this.ProposerAddress = make([]byte, v50) - for i := 0; i < v50; i++ { + v56 := r.Intn(100) + this.ProposerAddress = make([]byte, v56) + for i := 0; i < v56; i++ { this.ProposerAddress[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8847,13 +10922,13 @@ func NewPopulatedVersion(r randyTypes, easy bool) *Version { func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { this := &BlockID{} - v51 := r.Intn(100) - this.Hash = make([]byte, v51) - for i := 0; i < v51; i++ { + v57 := r.Intn(100) + this.Hash = make([]byte, v57) + for i := 0; i < v57; i++ { this.Hash[i] = byte(r.Intn(256)) } - v52 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v52 + v58 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v58 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -8866,9 +10941,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { if r.Intn(2) == 0 { this.Total *= -1 } - v53 := r.Intn(100) - this.Hash = make([]byte, v53) - for i := 0; i < v53; i++ { + v59 := r.Intn(100) + this.Hash = make([]byte, v59) + for i := 0; i < v59; i++ { this.Hash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8879,9 +10954,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v54 := r.Intn(100) - this.Address = make([]byte, v54) - for i := 0; i < v54; i++ { + v60 := r.Intn(100) + this.Address = make([]byte, v60) + for i := 0; i < v60; i++ { this.Address[i] = byte(r.Intn(256)) } this.Power = int64(r.Int63()) @@ -8896,8 +10971,8 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { this := &ValidatorUpdate{} - v55 := NewPopulatedPubKey(r, easy) - this.PubKey = *v55 + v61 := NewPopulatedPubKey(r, easy) + this.PubKey = *v61 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -8910,8 +10985,8 @@ func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { this := &VoteInfo{} - v56 := NewPopulatedValidator(r, easy) - this.Validator = *v56 + v62 := NewPopulatedValidator(r, easy) + this.Validator = *v62 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -8922,9 +10997,9 @@ func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v57 := r.Intn(100) - this.Data = make([]byte, v57) - for i := 0; i < v57; i++ { + v63 := r.Intn(100) + this.Data = make([]byte, v63) + for i := 0; i < v63; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -8936,14 +11011,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v58 := NewPopulatedValidator(r, easy) - this.Validator = *v58 + v64 := NewPopulatedValidator(r, easy) + this.Validator = *v64 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v59 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v59 + v65 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v65 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -8954,6 +11029,27 @@ func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { return this } +func NewPopulatedSnapshot(r randyTypes, easy bool) *Snapshot { + this := &Snapshot{} + this.Height = uint64(uint64(r.Uint32())) + this.Format = uint32(r.Uint32()) + this.Chunks = uint32(r.Uint32()) + v66 := r.Intn(100) + this.Hash = make([]byte, v66) + for i := 0; i < v66; i++ { + this.Hash[i] = byte(r.Intn(256)) + } + v67 := r.Intn(100) + this.Metadata = make([]byte, v67) + for i := 0; i < v67; i++ { + this.Metadata[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedTypes(r, 6) + } + return this +} + type randyTypes interface { Float32() float32 Float64() float64 @@ -8973,9 +11069,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v60 := r.Intn(100) - tmps := make([]rune, v60) - for i := 0; i < v60; i++ { + v68 := r.Intn(100) + tmps := make([]rune, v68) + for i := 0; i < v68; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -8997,11 +11093,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v61 := r.Int63() + v69 := r.Int63() if r.Intn(2) == 0 { - v61 *= -1 + v69 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v61)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v69)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -9161,6 +11257,54 @@ func (m *Request_Commit) Size() (n int) { } return n } +func (m *Request_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Request_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} func (m *Request_DeliverTx) Size() (n int) { if m == nil { return 0 @@ -9389,6 +11533,82 @@ func (m *RequestCommit) Size() (n int) { return n } +func (m *RequestListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestOfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Snapshot != nil { + l = m.Snapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.AppHash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunk != 0 { + n += 1 + sovTypes(uint64(m.Chunk)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *RequestApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Index != 0 { + n += 1 + sovTypes(uint64(m.Index)) + } + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Sender) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *Response) Size() (n int) { if m == nil { return 0 @@ -9548,6 +11768,54 @@ func (m *Response_Commit) Size() (n int) { } return n } +func (m *Response_ListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ListSnapshots != nil { + l = m.ListSnapshots.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_OfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.OfferSnapshot != nil { + l = m.OfferSnapshot.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_LoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.LoadSnapshotChunk != nil { + l = m.LoadSnapshotChunk.Size() + n += 1 + l + sovTypes(uint64(l)) + } + return n +} +func (m *Response_ApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ApplySnapshotChunk != nil { + l = m.ApplySnapshotChunk.Size() + n += 2 + l + sovTypes(uint64(l)) + } + return n +} func (m *ResponseException) Size() (n int) { if m == nil { return 0 @@ -9863,6 +12131,83 @@ func (m *ResponseCommit) Size() (n int) { return n } +func (m *ResponseListSnapshots) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Snapshots) > 0 { + for _, e := range m.Snapshots { + l = e.Size() + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseOfferSnapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseLoadSnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Chunk) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *ResponseApplySnapshotChunk) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Result != 0 { + n += 1 + sovTypes(uint64(m.Result)) + } + if len(m.RefetchChunks) > 0 { + l = 0 + for _, e := range m.RefetchChunks { + l += sovTypes(uint64(e)) + } + n += 1 + sovTypes(uint64(l)) + l + } + if len(m.RejectSenders) > 0 { + for _, s := range m.RejectSenders { + l = len(s) + n += 1 + l + sovTypes(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *ConsensusParams) Size() (n int) { if m == nil { return 0 @@ -10221,6 +12566,35 @@ func (m *Evidence) Size() (n int) { return n } +func (m *Snapshot) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Height != 0 { + n += 1 + sovTypes(uint64(m.Height)) + } + if m.Format != 0 { + n += 1 + sovTypes(uint64(m.Format)) + } + if m.Chunks != 0 { + n += 1 + sovTypes(uint64(m.Chunks)) + } + l = len(m.Hash) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + l = len(m.Metadata) + if l > 0 { + n += 1 + l + sovTypes(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovTypes(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -10606,6 +12980,146 @@ func (m *Request) Unmarshal(dAtA []byte) error { } m.Value = &Request_Commit{v} iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ListSnapshots{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_OfferSnapshot{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_LoadSnapshotChunk{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &RequestApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Request_ApplySnapshotChunk{v} + iNdEx = postIndex case 19: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field DeliverTx", wireType) @@ -11940,6 +14454,434 @@ func (m *RequestCommit) Unmarshal(dAtA []byte) error { } return nil } +func (m *RequestListSnapshots) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestListSnapshots: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestOfferSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestOfferSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Snapshot == nil { + m.Snapshot = &Snapshot{} + } + if err := m.Snapshot.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AppHash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AppHash = append(m.AppHash[:0], dAtA[iNdEx:postIndex]...) + if m.AppHash == nil { + m.AppHash = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestLoadSnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestLoadSnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + m.Chunk = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chunk |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *RequestApplySnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: RequestApplySnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: RequestApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Index", wireType) + } + m.Index = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Index |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Sender", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Sender = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *Response) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -12389,6 +15331,146 @@ func (m *Response) Unmarshal(dAtA []byte) error { } m.Value = &Response_Commit{v} iNdEx = postIndex + case 13: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListSnapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseListSnapshots{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ListSnapshots{v} + iNdEx = postIndex + case 14: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OfferSnapshot", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseOfferSnapshot{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_OfferSnapshot{v} + iNdEx = postIndex + case 15: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LoadSnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseLoadSnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_LoadSnapshotChunk{v} + iNdEx = postIndex + case 16: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ApplySnapshotChunk", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ResponseApplySnapshotChunk{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Value = &Response_ApplySnapshotChunk{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) @@ -14305,6 +17387,436 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { } return nil } +func (m *ResponseListSnapshots) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseListSnapshots: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseListSnapshots: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Snapshots", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Snapshots = append(m.Snapshots, &Snapshot{}) + if err := m.Snapshots[len(m.Snapshots)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseOfferSnapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseOfferSnapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseOfferSnapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= ResponseOfferSnapshot_Result(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseLoadSnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseLoadSnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunk", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Chunk = append(m.Chunk[:0], dAtA[iNdEx:postIndex]...) + if m.Chunk == nil { + m.Chunk = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ResponseApplySnapshotChunk) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResponseApplySnapshotChunk: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResponseApplySnapshotChunk: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Result", wireType) + } + m.Result = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Result |= ResponseApplySnapshotChunk_Result(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType == 0 { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RefetchChunks = append(m.RefetchChunks, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + packedLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + var elementCount int + var count int + for _, integer := range dAtA[iNdEx:postIndex] { + if integer < 128 { + count++ + } + } + elementCount = count + if elementCount != 0 && len(m.RefetchChunks) == 0 { + m.RefetchChunks = make([]uint32, 0, elementCount) + } + for iNdEx < postIndex { + var v uint32 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.RefetchChunks = append(m.RefetchChunks, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field RefetchChunks", wireType) + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RejectSenders", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RejectSenders = append(m.RejectSenders, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ConsensusParams) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -16580,6 +20092,185 @@ func (m *Evidence) Unmarshal(dAtA []byte) error { } return nil } +func (m *Snapshot) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Snapshot: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Snapshot: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Height", wireType) + } + m.Height = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Height |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType) + } + m.Format = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Format |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Chunks", wireType) + } + m.Chunks = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Chunks |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hash", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hash = append(m.Hash[:0], dAtA[iNdEx:postIndex]...) + if m.Hash == nil { + m.Hash = []byte{} + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthTypes + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthTypes + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Metadata = append(m.Metadata[:0], dAtA[iNdEx:postIndex]...) + if m.Metadata == nil { + m.Metadata = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTypes(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) < 0 { + return ErrInvalidLengthTypes + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipTypes(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/abci/types/types.proto b/abci/types/types.proto index 2a9cbc4d6..5683acb29 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -27,17 +27,21 @@ option (gogoproto.testgen_all) = true; message Request { oneof value { - RequestEcho echo = 2; - RequestFlush flush = 3; - RequestInfo info = 4; - RequestSetOption set_option = 5; - RequestInitChain init_chain = 6; - RequestQuery query = 7; - RequestBeginBlock begin_block = 8; - RequestCheckTx check_tx = 9; - RequestDeliverTx deliver_tx = 19; - RequestEndBlock end_block = 11; - RequestCommit commit = 12; + RequestEcho echo = 2; + RequestFlush flush = 3; + RequestInfo info = 4; + RequestSetOption set_option = 5; + RequestInitChain init_chain = 6; + RequestQuery query = 7; + RequestBeginBlock begin_block = 8; + RequestCheckTx check_tx = 9; + RequestDeliverTx deliver_tx = 19; + RequestEndBlock end_block = 11; + RequestCommit commit = 12; + RequestListSnapshots list_snapshots = 13; + RequestOfferSnapshot offer_snapshot = 14; + RequestLoadSnapshotChunk load_snapshot_chunk = 15; + RequestApplySnapshotChunk apply_snapshot_chunk = 16; } } @@ -101,23 +105,51 @@ message RequestEndBlock { message RequestCommit {} +// lists available snapshots +message RequestListSnapshots { +} + +// offers a snapshot to the application +message RequestOfferSnapshot { + Snapshot snapshot = 1; // snapshot offered by peers + bytes app_hash = 2; // light client-verified app hash for snapshot height +} + +// loads a snapshot chunk +message RequestLoadSnapshotChunk { + uint64 height = 1; + uint32 format = 2; + uint32 chunk = 3; +} + +// Applies a snapshot chunk +message RequestApplySnapshotChunk { + uint32 index = 1; + bytes chunk = 2; + string sender = 3; +} + //---------------------------------------- // Response types message Response { oneof value { - ResponseException exception = 1; - ResponseEcho echo = 2; - ResponseFlush flush = 3; - ResponseInfo info = 4; - ResponseSetOption set_option = 5; - ResponseInitChain init_chain = 6; - ResponseQuery query = 7; - ResponseBeginBlock begin_block = 8; - ResponseCheckTx check_tx = 9; - ResponseDeliverTx deliver_tx = 10; - ResponseEndBlock end_block = 11; - ResponseCommit commit = 12; + ResponseException exception = 1; + ResponseEcho echo = 2; + ResponseFlush flush = 3; + ResponseInfo info = 4; + ResponseSetOption set_option = 5; + ResponseInitChain init_chain = 6; + ResponseQuery query = 7; + ResponseBeginBlock begin_block = 8; + ResponseCheckTx check_tx = 9; + ResponseDeliverTx deliver_tx = 10; + ResponseEndBlock end_block = 11; + ResponseCommit commit = 12; + ResponseListSnapshots list_snapshots = 13; + ResponseOfferSnapshot offer_snapshot = 14; + ResponseLoadSnapshotChunk load_snapshot_chunk = 15; + ResponseApplySnapshotChunk apply_snapshot_chunk = 16; } } @@ -210,6 +242,40 @@ message ResponseCommit { int64 retain_height = 3; } +message ResponseListSnapshots { + repeated Snapshot snapshots = 1; +} + +message ResponseOfferSnapshot { + Result result = 1; + + enum Result { + accept = 0; // Snapshot accepted, apply chunks + abort = 1; // Abort all snapshot restoration + reject = 2; // Reject this specific snapshot, and try a different one + reject_format = 3; // Reject all snapshots of this format, and try a different one + reject_sender = 4; // Reject all snapshots from the sender(s), and try a different one + } +} + +message ResponseLoadSnapshotChunk { + bytes chunk = 1; +} + +message ResponseApplySnapshotChunk { + Result result = 1; + repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply (regardless of result) + repeated string reject_senders = 3; // Chunk senders to reject and ban (regardless of result) + + enum Result { + accept = 0; // Chunk successfully accepted + abort = 1; // Abort all snapshot restoration + retry = 2; // Retry chunk, combine with refetch and reject as appropriate + retry_snapshot = 3; // Retry snapshot, combine with refetch and reject as appropriate + reject_snapshot = 4; // Reject this snapshot, try a different one but keep sender rejections + } +} + //---------------------------------------- // Misc. @@ -336,6 +402,17 @@ message Evidence { int64 total_voting_power = 5; } +//---------------------------------------- +// State Sync Types + +message Snapshot { + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash - should be equal only for identical snapshots + bytes metadata = 5; // Arbitrary application metadata +} + //---------------------------------------- // Service Definition @@ -351,4 +428,8 @@ service ABCIApplication { rpc InitChain(RequestInitChain) returns (ResponseInitChain); rpc BeginBlock(RequestBeginBlock) returns (ResponseBeginBlock); rpc EndBlock(RequestEndBlock) returns (ResponseEndBlock); + rpc ListSnapshots(RequestListSnapshots) returns (ResponseListSnapshots); + rpc OfferSnapshot(RequestOfferSnapshot) returns (ResponseOfferSnapshot); + rpc LoadSnapshotChunk(RequestLoadSnapshotChunk) returns (ResponseLoadSnapshotChunk); + rpc ApplySnapshotChunk(RequestApplySnapshotChunk) returns (ResponseApplySnapshotChunk); } diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 7a79ad8a9..fcb8d025f 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -697,6 +697,230 @@ func TestRequestCommitMarshalTo(t *testing.T) { } } +func TestRequestListSnapshotsProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestListSnapshots(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestListSnapshots{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestListSnapshotsMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestListSnapshots(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestListSnapshots{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestOfferSnapshotProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestOfferSnapshot(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestOfferSnapshotMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestOfferSnapshot(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestLoadSnapshotChunkProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestLoadSnapshotChunk(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestLoadSnapshotChunkMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestLoadSnapshotChunk(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestApplySnapshotChunkProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestApplySnapshotChunk(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestRequestApplySnapshotChunkMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestApplySnapshotChunk(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -1425,6 +1649,230 @@ func TestResponseCommitMarshalTo(t *testing.T) { } } +func TestResponseListSnapshotsProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseListSnapshots(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseListSnapshots{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseListSnapshotsMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseListSnapshots(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseListSnapshots{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseOfferSnapshotProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseOfferSnapshot(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseOfferSnapshotMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseOfferSnapshot(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseLoadSnapshotChunkProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseLoadSnapshotChunk(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseLoadSnapshotChunkMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseLoadSnapshotChunk(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseApplySnapshotChunkProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseApplySnapshotChunk(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestResponseApplySnapshotChunkMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseApplySnapshotChunk(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestConsensusParamsProto(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2321,6 +2769,62 @@ func TestEvidenceMarshalTo(t *testing.T) { } } +func TestSnapshotProto(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedSnapshot(popr, false) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Snapshot{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + littlefuzz := make([]byte, len(dAtA)) + copy(littlefuzz, dAtA) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } + if len(littlefuzz) > 0 { + fuzzamount := 100 + for i := 0; i < fuzzamount; i++ { + littlefuzz[popr.Intn(len(littlefuzz))] = byte(popr.Intn(256)) + littlefuzz = append(littlefuzz, byte(popr.Intn(256))) + } + // shouldn't panic + _ = github_com_gogo_protobuf_proto.Unmarshal(littlefuzz, msg) + } +} + +func TestSnapshotMarshalTo(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedSnapshot(popr, false) + size := p.Size() + dAtA := make([]byte, size) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + _, err := p.MarshalTo(dAtA) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Snapshot{} + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestRequestJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2537,6 +3041,78 @@ func TestRequestCommitJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestRequestListSnapshotsJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestListSnapshots(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestListSnapshots{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestOfferSnapshotJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestOfferSnapshot(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestOfferSnapshot{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestLoadSnapshotChunkJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestLoadSnapshotChunk(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestLoadSnapshotChunk{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestRequestApplySnapshotChunkJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestApplySnapshotChunk(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &RequestApplySnapshotChunk{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestResponseJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -2771,6 +3347,78 @@ func TestResponseCommitJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestResponseListSnapshotsJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseListSnapshots(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseListSnapshots{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseOfferSnapshotJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseOfferSnapshot(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseOfferSnapshot{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseLoadSnapshotChunkJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseLoadSnapshotChunk(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseLoadSnapshotChunk{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} +func TestResponseApplySnapshotChunkJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseApplySnapshotChunk(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &ResponseApplySnapshotChunk{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestConsensusParamsJSON(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3059,6 +3707,24 @@ func TestEvidenceJSON(t *testing.T) { t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) } } +func TestSnapshotJSON(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedSnapshot(popr, true) + marshaler := github_com_gogo_protobuf_jsonpb.Marshaler{} + jsondata, err := marshaler.MarshalToString(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + msg := &Snapshot{} + err = github_com_gogo_protobuf_jsonpb.UnmarshalString(jsondata, msg) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Json Equal %#v", seed, msg, p) + } +} func TestRequestProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3395,6 +4061,118 @@ func TestRequestCommitProtoCompactText(t *testing.T) { } } +func TestRequestListSnapshotsProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestListSnapshots(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &RequestListSnapshots{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestListSnapshotsProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestListSnapshots(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &RequestListSnapshots{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestOfferSnapshotProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestOfferSnapshot(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &RequestOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestOfferSnapshotProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestOfferSnapshot(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &RequestOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestLoadSnapshotChunkProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestLoadSnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &RequestLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestLoadSnapshotChunkProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestLoadSnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &RequestLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestApplySnapshotChunkProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestApplySnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &RequestApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestRequestApplySnapshotChunkProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestApplySnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &RequestApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestResponseProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -3759,6 +4537,118 @@ func TestResponseCommitProtoCompactText(t *testing.T) { } } +func TestResponseListSnapshotsProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseListSnapshots(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseListSnapshots{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseListSnapshotsProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseListSnapshots(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseListSnapshots{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseOfferSnapshotProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseOfferSnapshot(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseOfferSnapshotProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseOfferSnapshot(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseOfferSnapshot{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseLoadSnapshotChunkProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseLoadSnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseLoadSnapshotChunkProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseLoadSnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseLoadSnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseApplySnapshotChunkProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseApplySnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &ResponseApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestResponseApplySnapshotChunkProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseApplySnapshotChunk(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &ResponseApplySnapshotChunk{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestConsensusParamsProtoText(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4207,6 +5097,34 @@ func TestEvidenceProtoCompactText(t *testing.T) { } } +func TestSnapshotProtoText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedSnapshot(popr, true) + dAtA := github_com_gogo_protobuf_proto.MarshalTextString(p) + msg := &Snapshot{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + +func TestSnapshotProtoCompactText(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedSnapshot(popr, true) + dAtA := github_com_gogo_protobuf_proto.CompactTextString(p) + msg := &Snapshot{} + if err := github_com_gogo_protobuf_proto.UnmarshalText(dAtA, msg); err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + if !p.Equal(msg) { + t.Fatalf("seed = %d, %#v !Proto %#v", seed, msg, p) + } +} + func TestRequestSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4471,6 +5389,94 @@ func TestRequestCommitSize(t *testing.T) { } } +func TestRequestListSnapshotsSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestListSnapshots(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestOfferSnapshotSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestOfferSnapshot(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestLoadSnapshotChunkSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestLoadSnapshotChunk(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestRequestApplySnapshotChunkSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedRequestApplySnapshotChunk(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestResponseSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -4757,6 +5763,94 @@ func TestResponseCommitSize(t *testing.T) { } } +func TestResponseListSnapshotsSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseListSnapshots(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseOfferSnapshotSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseOfferSnapshot(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseLoadSnapshotChunkSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseLoadSnapshotChunk(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + +func TestResponseApplySnapshotChunkSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedResponseApplySnapshotChunk(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + func TestConsensusParamsSize(t *testing.T) { seed := time.Now().UnixNano() popr := math_rand.New(math_rand.NewSource(seed)) @@ -5109,4 +6203,26 @@ func TestEvidenceSize(t *testing.T) { } } +func TestSnapshotSize(t *testing.T) { + seed := time.Now().UnixNano() + popr := math_rand.New(math_rand.NewSource(seed)) + p := NewPopulatedSnapshot(popr, true) + size2 := github_com_gogo_protobuf_proto.Size(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) + if err != nil { + t.Fatalf("seed = %d, err = %v", seed, err) + } + size := p.Size() + if len(dAtA) != size { + t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA)) + } + if size2 != size { + t.Errorf("seed = %d, size %v != before marshal proto.Size %v", seed, size, size2) + } + size3 := github_com_gogo_protobuf_proto.Size(p) + if size3 != size { + t.Errorf("seed = %d, size %v != after marshal proto.Size %v", seed, size, size3) + } +} + //These tests are generated by github.com/gogo/protobuf/plugin/testgen diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index c82457bee..1adde6bd5 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -41,7 +41,7 @@ const ( type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(sm.State, uint64) + SwitchToConsensus(state sm.State, skipWAL bool) } type peerError struct { @@ -115,11 +115,25 @@ func (bcR *BlockchainReactor) OnStart() error { if err != nil { return err } - go bcR.poolRoutine() + go bcR.poolRoutine(false) } return nil } +// SwitchToFastSync is called by the state sync reactor when switching to fast sync. +func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error { + bcR.fastSync = true + bcR.initialState = state + + bcR.pool.height = state.LastBlockHeight + 1 + err := bcR.pool.Start() + if err != nil { + return err + } + go bcR.poolRoutine(true) + return nil +} + // OnStop implements service.Service. func (bcR *BlockchainReactor) OnStop() { bcR.pool.Stop() @@ -213,7 +227,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // Handle messages from the poolReactor telling the reactor what to do. // NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -func (bcR *BlockchainReactor) poolRoutine() { +func (bcR *BlockchainReactor) poolRoutine(stateSynced bool) { trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) @@ -273,7 +287,7 @@ FOR_LOOP: bcR.pool.Stop() conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) if ok { - conR.SwitchToConsensus(state, blocksSynced) + conR.SwitchToConsensus(state, blocksSynced > 0 || stateSynced) } // else { // should only happen during testing diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index 315dbffbf..970825d14 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -44,7 +44,7 @@ var ( type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(sm.State, uint64) + SwitchToConsensus(state sm.State, skipWAL bool) } // BlockchainReactor handles long-term catchup syncing. @@ -57,7 +57,8 @@ type BlockchainReactor struct { blockExec *sm.BlockExecutor store *store.BlockStore - fastSync bool + fastSync bool + stateSynced bool fsm *BcReactorFSM blocksSynced uint64 @@ -154,6 +155,19 @@ func (bcR *BlockchainReactor) OnStop() { _ = bcR.Stop() } +// SwitchToFastSync is called by the state sync reactor when switching to fast sync. +func (bcR *BlockchainReactor) SwitchToFastSync(state sm.State) error { + bcR.fastSync = true + bcR.initialState = state + bcR.state = state + bcR.stateSynced = true + + bcR.fsm = NewFSM(state.LastBlockHeight+1, bcR) + bcR.fsm.SetLogger(bcR.Logger) + go bcR.poolRoutine() + return nil +} + // GetChannels implements Reactor func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ @@ -474,7 +488,7 @@ func (bcR *BlockchainReactor) sendBlockRequest(peerID p2p.ID, height int64) erro func (bcR *BlockchainReactor) switchToConsensus() { conR, ok := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) if ok { - conR.SwitchToConsensus(bcR.state, bcR.blocksSynced) + conR.SwitchToConsensus(bcR.state, bcR.blocksSynced > 0 || bcR.stateSynced) bcR.eventsFromFSMCh <- bcFsmMessage{event: syncFinishedEv} } // else { diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index 762e72db9..a82b2211c 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -167,7 +167,7 @@ type consensusReactorTest struct { mtx sync.Mutex } -func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced uint64) { +func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced bool) { conR.mtx.Lock() defer conR.mtx.Unlock() conR.switchedToConsensus = true diff --git a/blockchain/v2/io.go b/blockchain/v2/io.go index 32cf3aeaf..29850d3e5 100644 --- a/blockchain/v2/io.go +++ b/blockchain/v2/io.go @@ -16,7 +16,7 @@ type iIO interface { broadcastStatusRequest(base int64, height int64) - trySwitchToConsensus(state state.State, blocksSynced int) + trySwitchToConsensus(state state.State, skipWAL bool) } type switchIO struct { @@ -37,7 +37,7 @@ const ( type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(state.State, int) + SwitchToConsensus(state state.State, skipWAL bool) } func (sio *switchIO) sendBlockRequest(peerID p2p.ID, height int64) error { @@ -97,10 +97,10 @@ func (sio *switchIO) sendBlockNotFound(height int64, peerID p2p.ID) error { return nil } -func (sio *switchIO) trySwitchToConsensus(state state.State, blocksSynced int) { +func (sio *switchIO) trySwitchToConsensus(state state.State, skipWAL bool) { conR, ok := sio.sw.Reactor("CONSENSUS").(consensusReactor) if ok { - conR.SwitchToConsensus(state, blocksSynced) + conR.SwitchToConsensus(state, skipWAL) } } diff --git a/blockchain/v2/reactor.go b/blockchain/v2/reactor.go index d68b27a41..60c73e6a8 100644 --- a/blockchain/v2/reactor.go +++ b/blockchain/v2/reactor.go @@ -129,8 +129,8 @@ type blockStore interface { type BlockchainReactor struct { p2p.BaseReactor + fastSync bool // if true, enable fast sync on start events chan Event // XXX: Rename eventsFromPeers - stopDemux chan struct{} scheduler *Routine processor *Routine logger log.Logger @@ -157,7 +157,7 @@ type blockApplier interface { // XXX: unify naming in this package around tmState // XXX: V1 stores a copy of state as initialState, which is never mutated. Is that nessesary? func newReactor(state state.State, store blockStore, reporter behaviour.Reporter, - blockApplier blockApplier, bufferSize int) *BlockchainReactor { + blockApplier blockApplier, bufferSize int, fastSync bool) *BlockchainReactor { scheduler := newScheduler(state.LastBlockHeight, time.Now()) pContext := newProcessorContext(store, blockApplier, state) // TODO: Fix naming to just newProcesssor @@ -166,12 +166,12 @@ func newReactor(state state.State, store blockStore, reporter behaviour.Reporter return &BlockchainReactor{ events: make(chan Event, bufferSize), - stopDemux: make(chan struct{}), scheduler: newRoutine("scheduler", scheduler.handle, bufferSize), processor: newRoutine("processor", processor.handle, bufferSize), store: store, reporter: reporter, logger: log.NewNopLogger(), + fastSync: fastSync, } } @@ -182,7 +182,7 @@ func NewBlockchainReactor( store blockStore, fastSync bool) *BlockchainReactor { reporter := behaviour.NewMockReporter() - return newReactor(state, store, reporter, blockApplier, 1000) + return newReactor(state, store, reporter, blockApplier, 1000, fastSync) } // SetSwitch implements Reactor interface. @@ -226,9 +226,11 @@ func (r *BlockchainReactor) SetLogger(logger log.Logger) { // Start implements cmn.Service interface func (r *BlockchainReactor) Start() error { r.reporter = behaviour.NewSwitchReporter(r.BaseReactor.Switch) - go r.scheduler.start() - go r.processor.start() - go r.demux() + if r.fastSync { + go r.scheduler.start() + go r.processor.start() + go r.demux() + } return nil } @@ -306,19 +308,29 @@ func (r *BlockchainReactor) demux() { processBlockFreq = 20 * time.Millisecond doProcessBlockCh = make(chan struct{}, 1) doProcessBlockTk = time.NewTicker(processBlockFreq) + ) + defer doProcessBlockTk.Stop() + var ( prunePeerFreq = 1 * time.Second doPrunePeerCh = make(chan struct{}, 1) doPrunePeerTk = time.NewTicker(prunePeerFreq) + ) + defer doPrunePeerTk.Stop() + var ( scheduleFreq = 20 * time.Millisecond doScheduleCh = make(chan struct{}, 1) doScheduleTk = time.NewTicker(scheduleFreq) + ) + defer doScheduleTk.Stop() + var ( statusFreq = 10 * time.Second doStatusCh = make(chan struct{}, 1) doStatusTk = time.NewTicker(statusFreq) ) + defer doStatusTk.Stop() // XXX: Extract timers to make testing atemporal for { @@ -355,14 +367,20 @@ func (r *BlockchainReactor) demux() { case <-doStatusCh: r.io.broadcastStatusRequest(r.store.Base(), r.SyncHeight()) - // Events from peers - case event := <-r.events: + // Events from peers. Closing the channel signals event loop termination. + case event, ok := <-r.events: + if !ok { + r.logger.Info("Stopping event processing") + return + } switch event := event.(type) { case bcStatusResponse: r.setMaxPeerHeight(event.height) r.scheduler.send(event) case bcAddNewPeer, bcRemovePeer, bcBlockResponse, bcNoBlockResponse: r.scheduler.send(event) + default: + r.logger.Error("Received unknown event", "event", fmt.Sprintf("%T", event)) } // Incremental events form scheduler @@ -378,6 +396,9 @@ func (r *BlockchainReactor) demux() { case scFinishedEv: r.processor.send(event) r.scheduler.stop() + case noOpEvent: + default: + r.logger.Error("Received unknown scheduler event", "event", fmt.Sprintf("%T", event)) } // Incremental events from processor @@ -395,22 +416,30 @@ func (r *BlockchainReactor) demux() { case pcBlockVerificationFailure: r.scheduler.send(event) case pcFinished: - r.io.trySwitchToConsensus(event.tmState, event.blocksSynced) + r.io.trySwitchToConsensus(event.tmState, event.blocksSynced > 0) r.processor.stop() + case noOpEvent: + default: + r.logger.Error("Received unknown processor event", "event", fmt.Sprintf("%T", event)) } - // Terminal events from scheduler + // Terminal event from scheduler case err := <-r.scheduler.final(): - r.logger.Info(fmt.Sprintf("scheduler final %s", err)) - // send the processor stop? + switch err { + case nil: + r.logger.Info("Scheduler stopped") + default: + r.logger.Error("Scheduler aborted with error", "err", err) + } // Terminal event from processor - case event := <-r.processor.final(): - r.logger.Info(fmt.Sprintf("processor final %s", event)) - - case <-r.stopDemux: - r.logger.Info("demuxing stopped") - return + case err := <-r.processor.final(): + switch err { + case nil: + r.logger.Info("Processor stopped") + default: + r.logger.Error("Processor aborted with error", "err", err) + } } } } @@ -421,7 +450,6 @@ func (r *BlockchainReactor) Stop() error { r.scheduler.stop() r.processor.stop() - close(r.stopDemux) close(r.events) r.logger.Info("reactor stopped") diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index 4b3013a2d..aaa5c979d 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -118,7 +118,7 @@ func (sio *mockSwitchIo) sendBlockNotFound(height int64, peerID p2p.ID) error { return nil } -func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, blocksSynced int) { +func (sio *mockSwitchIo) trySwitchToConsensus(state sm.State, skipWAL bool) { sio.mtx.Lock() defer sio.mtx.Unlock() sio.switchedToConsensus = true @@ -157,7 +157,7 @@ func newTestReactor(p testReactorParams) *BlockchainReactor { sm.SaveState(db, state) } - r := newReactor(state, store, reporter, appl, p.bufferSize) + r := newReactor(state, store, reporter, appl, p.bufferSize, true) logger := log.TestingLogger() r.SetLogger(logger.With("module", "blockchain")) diff --git a/blockchain/v2/routine.go b/blockchain/v2/routine.go index ff12bfebc..ad32e3e82 100644 --- a/blockchain/v2/routine.go +++ b/blockchain/v2/routine.go @@ -69,9 +69,11 @@ func (rt *Routine) start() { for { events, err := rt.queue.Get(1) - if err != nil { - rt.logger.Info(fmt.Sprintf("%s: stopping\n", rt.name)) - rt.terminate(fmt.Errorf("stopped")) + if err == queue.ErrDisposed { + rt.terminate(nil) + return + } else if err != nil { + rt.terminate(err) return } oEvent, err := rt.handle(events[0].(Event)) @@ -131,6 +133,7 @@ func (rt *Routine) final() chan error { // XXX: Maybe get rid of this func (rt *Routine) terminate(reason error) { - close(rt.out) + // We don't close the rt.out channel here, to avoid spinning on the closed channel + // in the event loop. rt.fin <- reason } diff --git a/config/config.go b/config/config.go index 173713279..49ccf186a 100644 --- a/config/config.go +++ b/config/config.go @@ -1,13 +1,13 @@ package config import ( + "encoding/hex" + "errors" "fmt" "net/http" "os" "path/filepath" "time" - - "github.com/pkg/errors" ) const ( @@ -65,6 +65,7 @@ type Config struct { RPC *RPCConfig `mapstructure:"rpc"` P2P *P2PConfig `mapstructure:"p2p"` Mempool *MempoolConfig `mapstructure:"mempool"` + StateSync *StateSyncConfig `mapstructure:"statesync"` FastSync *FastSyncConfig `mapstructure:"fastsync"` Consensus *ConsensusConfig `mapstructure:"consensus"` TxIndex *TxIndexConfig `mapstructure:"tx_index"` @@ -78,6 +79,7 @@ func DefaultConfig() *Config { RPC: DefaultRPCConfig(), P2P: DefaultP2PConfig(), Mempool: DefaultMempoolConfig(), + StateSync: DefaultStateSyncConfig(), FastSync: DefaultFastSyncConfig(), Consensus: DefaultConsensusConfig(), TxIndex: DefaultTxIndexConfig(), @@ -92,6 +94,7 @@ func TestConfig() *Config { RPC: TestRPCConfig(), P2P: TestP2PConfig(), Mempool: TestMempoolConfig(), + StateSync: TestStateSyncConfig(), FastSync: TestFastSyncConfig(), Consensus: TestConsensusConfig(), TxIndex: TestTxIndexConfig(), @@ -116,24 +119,27 @@ func (cfg *Config) ValidateBasic() error { return err } if err := cfg.RPC.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [rpc] section") + return fmt.Errorf("error in [rpc] section: %w", err) } if err := cfg.P2P.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [p2p] section") + return fmt.Errorf("error in [p2p] section: %w", err) } if err := cfg.Mempool.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [mempool] section") + return fmt.Errorf("error in [mempool] section: %w", err) + } + if err := cfg.StateSync.ValidateBasic(); err != nil { + return fmt.Errorf("error in [statesync] section: %w", err) } if err := cfg.FastSync.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [fastsync] section") + return fmt.Errorf("error in [fastsync] section: %w", err) } if err := cfg.Consensus.ValidateBasic(); err != nil { - return errors.Wrap(err, "Error in [consensus] section") + return fmt.Errorf("error in [consensus] section: %w", err) } - return errors.Wrap( - cfg.Instrumentation.ValidateBasic(), - "Error in [instrumentation] section", - ) + if err := cfg.Instrumentation.ValidateBasic(); err != nil { + return fmt.Errorf("error in [instrumentation] section: %w", err) + } + return nil } //----------------------------------------------------------------------------- @@ -292,7 +298,7 @@ func DefaultLogLevel() string { // DefaultPackageLogLevels returns a default log level setting so all packages // log at "error", while the `state` and `main` packages log at "info" func DefaultPackageLogLevels() string { - return fmt.Sprintf("main:info,state:info,*:%s", DefaultLogLevel()) + return fmt.Sprintf("main:info,state:info,statesync:info,*:%s", DefaultLogLevel()) } //----------------------------------------------------------------------------- @@ -701,6 +707,69 @@ func (cfg *MempoolConfig) ValidateBasic() error { return nil } +//----------------------------------------------------------------------------- +// StateSyncConfig + +// StateSyncConfig defines the configuration for the Tendermint state sync service +type StateSyncConfig struct { + Enable bool `mapstructure:"enable"` + TempDir string `mapstructure:"temp_dir"` + RPCServers []string `mapstructure:"rpc_servers"` + TrustPeriod time.Duration `mapstructure:"trust_period"` + TrustHeight int64 `mapstructure:"trust_height"` + TrustHash string `mapstructure:"trust_hash"` +} + +func (cfg *StateSyncConfig) TrustHashBytes() []byte { + // validated in ValidateBasic, so we can safely panic here + bytes, err := hex.DecodeString(cfg.TrustHash) + if err != nil { + panic(err) + } + return bytes +} + +// DefaultStateSyncConfig returns a default configuration for the state sync service +func DefaultStateSyncConfig() *StateSyncConfig { + return &StateSyncConfig{} +} + +// TestFastSyncConfig returns a default configuration for the state sync service +func TestStateSyncConfig() *StateSyncConfig { + return DefaultStateSyncConfig() +} + +// ValidateBasic performs basic validation. +func (cfg *StateSyncConfig) ValidateBasic() error { + if cfg.Enable { + if len(cfg.RPCServers) == 0 { + return errors.New("rpc_servers is required") + } + if len(cfg.RPCServers) < 2 { + return errors.New("at least two rpc_servers entries is required") + } + for _, server := range cfg.RPCServers { + if len(server) == 0 { + return errors.New("found empty rpc_servers entry") + } + } + if cfg.TrustPeriod <= 0 { + return errors.New("trusted_period is required") + } + if cfg.TrustHeight <= 0 { + return errors.New("trusted_height is required") + } + if len(cfg.TrustHash) == 0 { + return errors.New("trusted_hash is required") + } + _, err := hex.DecodeString(cfg.TrustHash) + if err != nil { + return fmt.Errorf("invalid trusted_hash: %w", err) + } + } + return nil +} + //----------------------------------------------------------------------------- // FastSyncConfig diff --git a/config/config_test.go b/config/config_test.go index c83f1c3f5..4cd161125 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestDefaultConfig(t *testing.T) { @@ -122,6 +123,11 @@ func TestMempoolConfigValidateBasic(t *testing.T) { } } +func TestStateSyncConfigValidateBasic(t *testing.T) { + cfg := TestStateSyncConfig() + require.NoError(t, cfg.ValidateBasic()) +} + func TestFastSyncConfigValidateBasic(t *testing.T) { cfg := TestFastSyncConfig() assert.NoError(t, cfg.ValidateBasic()) diff --git a/config/toml.go b/config/toml.go index 3fe4d1aac..5fac85cf8 100644 --- a/config/toml.go +++ b/config/toml.go @@ -315,6 +315,30 @@ cache_size = {{ .Mempool.CacheSize }} # NOTE: the max size of a tx transmitted over the network is {max_tx_bytes} + {amino overhead}. max_tx_bytes = {{ .Mempool.MaxTxBytes }} +##### state sync configuration options ##### +[statesync] +# State sync rapidly bootstraps a new node by discovering, fetching, and restoring a state machine +# snapshot from peers instead of fetching and replaying historical blocks. Requires some peers in +# the network to take and serve state machine snapshots. State sync is not attempted if the node +# has any local state (LastBlockHeight > 0). The node will have a truncated block history, +# starting from the height of the snapshot. +enable = {{ .StateSync.Enable }} + +# RPC servers (comma-separated) for light client verification of the synced state machine and +# retrieval of state data for node bootstrapping. Also needs a trusted height and corresponding +# header hash obtained from a trusted source, and a period during which validators can be trusted. +# +# For Cosmos SDK-based chains, trust_period should usually be about 2/3 of the unbonding time (~2 +# weeks) during which they can be financially punished (slashed) for misbehavior. +rpc_servers = "" +trust_height = {{ .StateSync.TrustHeight }} +trust_hash = "{{ .StateSync.TrustHash }}" +trust_period = "{{ .StateSync.TrustPeriod }}" + +# Temporary directory for state sync snapshot chunks, defaults to the OS tempdir (typically /tmp). +# Will create a new, randomly named directory within, and remove it when done. +temp_dir = "{{ .StateSync.TempDir }}" + ##### fast sync configuration options ##### [fastsync] diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 649d47fbb..5b9ec1768 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -112,13 +112,13 @@ func TestByzantine(t *testing.T) { // note these must be started before the byz for i := 1; i < N; i++ { cr := reactors[i].(*Reactor) - cr.SwitchToConsensus(cr.conS.GetState(), 0) + cr.SwitchToConsensus(cr.conS.GetState(), false) } // start the byzantine state machine byzR := reactors[0].(*ByzantineReactor) s := byzR.reactor.conS.GetState() - byzR.reactor.SwitchToConsensus(s, 0) + byzR.reactor.SwitchToConsensus(s, false) // byz proposer sends one block to peers[0] // and the other block to peers[1] and peers[2]. @@ -268,8 +268,8 @@ func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { peer.Set(types.PeerStateKey, peerState) // Send our state to peer. - // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). - if !br.reactor.fastSync { + // If we're syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). + if !br.reactor.waitSync { br.reactor.sendNewRoundStepMessage(peer) } } diff --git a/consensus/metrics.go b/consensus/metrics.go index 5fa27118a..b4c6458dc 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -55,6 +55,8 @@ type Metrics struct { CommittedHeight metrics.Gauge // Whether or not a node is fast syncing. 1 if yes, 0 if no. FastSyncing metrics.Gauge + // Whether or not a node is state syncing. 1 if yes, 0 if no. + StateSyncing metrics.Gauge // Number of blockparts transmitted by peer. BlockParts metrics.Counter @@ -174,6 +176,12 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Name: "fast_syncing", Help: "Whether or not a node is fast syncing. 1 if yes, 0 if no.", }, labels).With(labelsAndValues...), + StateSyncing: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: MetricsSubsystem, + Name: "state_syncing", + Help: "Whether or not a node is state syncing. 1 if yes, 0 if no.", + }, labels).With(labelsAndValues...), BlockParts: prometheus.NewCounterFrom(stdprometheus.CounterOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, @@ -208,6 +216,7 @@ func NopMetrics() *Metrics { TotalTxs: discard.NewGauge(), CommittedHeight: discard.NewGauge(), FastSyncing: discard.NewGauge(), + StateSyncing: discard.NewGauge(), BlockParts: discard.NewCounter(), } } diff --git a/consensus/reactor.go b/consensus/reactor.go index 99f40ac0b..eb9dd0871 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -41,23 +41,22 @@ type Reactor struct { conS *State mtx sync.RWMutex - fastSync bool + waitSync bool eventBus *types.EventBus - metrics *Metrics + Metrics *Metrics } type ReactorOption func(*Reactor) // NewReactor returns a new Reactor with the given // consensusState. -func NewReactor(consensusState *State, fastSync bool, options ...ReactorOption) *Reactor { +func NewReactor(consensusState *State, waitSync bool, options ...ReactorOption) *Reactor { conR := &Reactor{ conS: consensusState, - fastSync: fastSync, - metrics: NopMetrics(), + waitSync: waitSync, + Metrics: NopMetrics(), } - conR.updateFastSyncingMetric() conR.BaseReactor = *p2p.NewBaseReactor("Consensus", conR) for _, option := range options { @@ -70,14 +69,14 @@ func NewReactor(consensusState *State, fastSync bool, options ...ReactorOption) // OnStart implements BaseService by subscribing to events, which later will be // broadcasted to other peers and starting state if we're not in fast sync. func (conR *Reactor) OnStart() error { - conR.Logger.Info("Reactor ", "fastSync", conR.FastSync()) + conR.Logger.Info("Reactor ", "waitSync", conR.WaitSync()) // start routine that computes peer statistics for evaluating peer quality go conR.peerStatsRoutine() conR.subscribeToBroadcastEvents() - if !conR.FastSync() { + if !conR.WaitSync() { err := conR.conS.Start() if err != nil { return err @@ -92,14 +91,14 @@ func (conR *Reactor) OnStart() error { func (conR *Reactor) OnStop() { conR.unsubscribeFromBroadcastEvents() conR.conS.Stop() - if !conR.FastSync() { + if !conR.WaitSync() { conR.conS.Wait() } } // SwitchToConsensus switches from fast_sync mode to consensus mode. // It resets the state, turns off fast_sync, and starts the consensus state-machine -func (conR *Reactor) SwitchToConsensus(state sm.State, blocksSynced uint64) { +func (conR *Reactor) SwitchToConsensus(state sm.State, skipWAL bool) { conR.Logger.Info("SwitchToConsensus") conR.conS.reconstructLastCommit(state) // NOTE: The line below causes broadcastNewRoundStepRoutine() to @@ -107,12 +106,12 @@ func (conR *Reactor) SwitchToConsensus(state sm.State, blocksSynced uint64) { conR.conS.updateToState(state) conR.mtx.Lock() - conR.fastSync = false + conR.waitSync = false conR.mtx.Unlock() - conR.metrics.FastSyncing.Set(0) + conR.Metrics.FastSyncing.Set(0) + conR.Metrics.StateSyncing.Set(0) - if blocksSynced > 0 { - // dont bother with the WAL if we fast synced + if skipWAL { conR.conS.doWALCatchup = false } err := conR.conS.Start() @@ -187,7 +186,7 @@ func (conR *Reactor) AddPeer(peer p2p.Peer) { // Send our state to peer. // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). - if !conR.FastSync() { + if !conR.WaitSync() { conR.sendNewRoundStepMessage(peer) } } @@ -284,8 +283,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } case DataChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) return } switch msg := msg.(type) { @@ -296,15 +295,15 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { ps.ApplyProposalPOLMessage(msg) case *BlockPartMessage: ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) - conR.metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) + conR.Metrics.BlockParts.With("peer_id", string(src.ID())).Add(1) conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} default: conR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } case VoteChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) return } switch msg := msg.(type) { @@ -325,8 +324,8 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } case VoteSetBitsChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) + if conR.WaitSync() { + conR.Logger.Info("Ignoring message received during sync", "msg", msg) return } switch msg := msg.(type) { @@ -366,11 +365,11 @@ func (conR *Reactor) SetEventBus(b *types.EventBus) { conR.conS.SetEventBus(b) } -// FastSync returns whether the consensus reactor is in fast-sync mode. -func (conR *Reactor) FastSync() bool { +// WaitSync returns whether the consensus reactor is waiting for state/fast sync. +func (conR *Reactor) WaitSync() bool { conR.mtx.RLock() defer conR.mtx.RUnlock() - return conR.fastSync + return conR.waitSync } //-------------------------------------- @@ -886,19 +885,9 @@ func (conR *Reactor) StringIndented(indent string) string { return s } -func (conR *Reactor) updateFastSyncingMetric() { - var fastSyncing float64 - if conR.fastSync { - fastSyncing = 1 - } else { - fastSyncing = 0 - } - conR.metrics.FastSyncing.Set(fastSyncing) -} - // ReactorMetrics sets the metrics func ReactorMetrics(metrics *Metrics) ReactorOption { - return func(conR *Reactor) { conR.metrics = metrics } + return func(conR *Reactor) { conR.Metrics = metrics } } //----------------------------------------------------------------------------- diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index f3d88e1e6..3c3eef7ed 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -75,7 +75,7 @@ func startConsensusNet(t *testing.T, css []*State, n int) ( // TODO: is this still true with new pubsub? for i := 0; i < n; i++ { s := reactors[i].conS.GetState() - reactors[i].SwitchToConsensus(s, 0) + reactors[i].SwitchToConsensus(s, false) } return reactors, blocksSubs, eventBuses } diff --git a/docs/architecture/adr-053-state-sync-prototype.md b/docs/architecture/adr-053-state-sync-prototype.md index 2848f9dd4..90169f44c 100644 --- a/docs/architecture/adr-053-state-sync-prototype.md +++ b/docs/architecture/adr-053-state-sync-prototype.md @@ -1,5 +1,7 @@ # ADR 053: State Sync Prototype +State sync is now [merged](https://github.com/tendermint/tendermint/pull/4705). Up-to-date ABCI documentation is [available](https://github.com/tendermint/spec/pull/90), refer to it rather than this ADR for details. + This ADR outlines the plan for an initial state sync prototype, and is subject to change as we gain feedback and experience. It builds on discussions and findings in [ADR-042](./adr-042-state-sync.md), see that for background information. ## Changelog @@ -22,6 +24,8 @@ This ADR outlines the plan for an initial state sync prototype, and is subject t * ABCI: chunks are now 0-indexed, for parity with `chunk_hashes` array. * Reduced maximum chunk size to 16 MB, and increased snapshot message size to 4 MB. +* 2020-04-29: Update with final released ABCI interface (Erik Grinaker) + ## Context State sync will allow a new node to receive a snapshot of the application state without downloading blocks or going through consensus. This bootstraps the node significantly faster than the current fast sync system, which replays all historical blocks. @@ -48,10 +52,11 @@ A node can have multiple snapshots taken at various heights. Snapshots can be ta ```proto message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - repeated bytes chunk_hashes = 3; // SHA-256 checksums of all chunks, in order - bytes metadata = 4; // Arbitrary application metadata + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + uint32 chunks = 3; // Number of chunks in the snapshot + bytes hash = 4; // Arbitrary snapshot hash - should be equal only for identical snapshots + bytes metadata = 5; // Arbitrary application metadata } ``` @@ -64,50 +69,57 @@ Chunks are exchanged simply as `bytes`, and cannot be larger than 16 MB. `Snapsh message RequestListSnapshots {} message ResponseListSnapshots { - repeated Snapshot snapshots = 1; + repeated Snapshot snapshots = 1; } // Offers a snapshot to the application message RequestOfferSnapshot { - Snapshot snapshot = 1; - bytes app_hash = 2; -} + Snapshot snapshot = 1; // snapshot offered by peers + bytes app_hash = 2; // light client-verified app hash for snapshot height + } message ResponseOfferSnapshot { - bool accepted = 1; - Reason reason = 2; + Result result = 1; - enum Reason { // Reason why snapshot was rejected - unknown = 0; // Unknown or generic reason - invalid_height = 1; // Height is rejected: avoid this height - invalid_format = 2; // Format is rejected: avoid this format - } + enum Result { + accept = 0; // Snapshot accepted, apply chunks + abort = 1; // Abort all snapshot restoration + reject = 2; // Reject this specific snapshot, and try a different one + reject_format = 3; // Reject all snapshots of this format, and try a different one + reject_sender = 4; // Reject all snapshots from the sender(s), and try a different one + } } // Loads a snapshot chunk message RequestLoadSnapshotChunk { - uint64 height = 1; - uint32 format = 2; - uint32 chunk = 3; // Zero-indexed + uint64 height = 1; + uint32 format = 2; + uint32 chunk = 3; // Zero-indexed } message ResponseLoadSnapshotChunk { - bytes chunk = 1; + bytes chunk = 1; } // Applies a snapshot chunk message RequestApplySnapshotChunk { - bytes chunk = 1; -} + uint32 index = 1; + bytes chunk = 2; + string sender = 3; + } message ResponseApplySnapshotChunk { - bool applied = 1; - Reason reason = 2; // Reason why chunk failed + Result result = 1; + repeated uint32 refetch_chunks = 2; // Chunks to refetch and reapply (regardless of result) + repeated string reject_senders = 3; // Chunk senders to reject and ban (regardless of result) - enum Reason { // Reason why chunk failed - unknown = 0; // Unknown or generic reason - verify_failed = 1; // Snapshot verification failed - } + enum Result { + accept = 0; // Chunk successfully accepted + abort = 1; // Abort all snapshot restoration + retry = 2; // Retry chunk, combine with refetch and reject as appropriate + retry_snapshot = 3; // Retry snapshot, combine with refetch and reject as appropriate + reject_snapshot = 4; // Reject this snapshot, try a different one but keep sender rejections + } } ``` @@ -141,15 +153,15 @@ When starting an empty node with state sync and fast sync enabled, snapshots are 4. The node requests available snapshots via P2P from peers, via `RequestListSnapshots`. Peers will return the 10 most recent snapshots, one message per snapshot. -5. The node aggregates snapshots from multiple peers, ordered by height and format (in reverse). If there are `chunk_hashes` mismatches between different snapshots, the one hosted by the largest amount of peers is chosen. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: +5. The node aggregates snapshots from multiple peers, ordered by height and format (in reverse). If there are mismatches between different snapshots, the one hosted by the largest amount of peers is chosen. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: * The snapshot height's block is considered trustworthy by the light client (i.e. snapshot height is greater than trusted header and within unbonding period of the latest trustworthy block). - * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOfferSnapshot` call (via `invalid_height` or `invalid_format`). + * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOfferSnapshot`. * The application accepts the `RequestOfferSnapshot` call. -6. The node downloads chunks in parallel from multiple peers, via `RequestLoadSnapshotChunk`, and both the sender and receiver verifies their checksums. Chunk messages cannot exceed 16 MB. +6. The node downloads chunks in parallel from multiple peers, via `RequestLoadSnapshotChunk`. Chunk messages cannot exceed 16 MB. 7. The node passes chunks sequentially to the app via `RequestApplySnapshotChunk`. @@ -191,38 +203,6 @@ Taking consistent snapshots of IAVL trees is greatly simplified by them being ve Snapshots must also be garbage collected after some configurable time, e.g. by keeping the latest `n` snapshots. -## Experimental Prototype - -An experimental but functional state sync prototype is available in the `erik/statesync-prototype` branches of the Tendermint, IAVL, Cosmos SDK, and Gaia repositories. To fetch the necessary branches: - -```sh -$ mkdir statesync -$ cd statesync -$ git clone git@github.com:tendermint/tendermint -b erik/statesync-prototype -$ git clone git@github.com:tendermint/iavl -b erik/statesync-prototype -$ git clone git@github.com:cosmos/cosmos-sdk -b erik/statesync-prototype -$ git clone git@github.com:cosmos/gaia -b erik/statesync-prototype -``` - -To spin up three nodes of a four-node testnet: - -```sh -$ cd gaia -$ ./tools/start.sh -``` - -Wait for the first snapshot to be taken at height 3, then (in a separate terminal) start the fourth node with state sync enabled: - -```sh -$ ./tools/sync.sh -``` - -To stop the testnet, run: - -```sh -$ ./tools/stop.sh -``` - ## Resolved Questions * Is it OK for state-synced nodes to not have historical blocks nor historical IAVL versions? @@ -265,46 +245,6 @@ $ ./tools/stop.sh > No, just use a max message size. -## Implementation Plan - -### Core Tasks - -* **Tendermint:** light client P2P transport [#4456](https://github.com/tendermint/tendermint/issues/4456) - -* **IAVL:** export/import API [#210](https://github.com/tendermint/iavl/issues/210) - -* **Cosmos SDK:** snapshotting, scheduling, and pruning [#5689](https://github.com/cosmos/cosmos-sdk/issues/5689) - -* **Tendermint:** support starting with a truncated block history - -* **Tendermint:** state sync reactor and ABCI interface [#828](https://github.com/tendermint/tendermint/issues/828) - -* **Cosmos SDK:** snapshot ABCI implementation [#5690](https://github.com/cosmos/cosmos-sdk/issues/5690) - -### Nice-to-Haves - -* **Tendermint:** staged reactor startup (state sync → fast sync → block replay → wal replay → consensus) - - > Let's do a time-boxed prototype (a few days) and see how much work it will be. - - * Notify P2P peers about channel changes [#4394](https://github.com/tendermint/tendermint/issues/4394) - - * Check peers have certain channels [#1148](https://github.com/tendermint/tendermint/issues/1148) - -* **Tendermint:** prune blockchain history [#3652](https://github.com/tendermint/tendermint/issues/3652) - -* **Tendermint:** allow genesis to start from non-zero height [#2543](https://github.com/tendermint/tendermint/issues/2543) - -### Follow-up Tasks - -* **Tendermint:** light client verification for fast sync [#4457](https://github.com/tendermint/tendermint/issues/4457) - -* **Tendermint:** allow start with only blockstore [#3713](https://github.com/tendermint/tendermint/issues/3713) - -* **Tendermint:** node should go back to fast-syncing when lagging significantly [#129](https://github.com/tendermint/tendermint/issues/129) - -* **Tendermint:** backfill historical blocks [#4629](https://github.com/tendermint/tendermint/issues/4629) - ## Status Accepted diff --git a/go.mod b/go.mod index 9557b9e44..c048f94db 100644 --- a/go.mod +++ b/go.mod @@ -17,7 +17,7 @@ require ( github.com/libp2p/go-buffer-pool v0.0.2 github.com/magiconair/properties v1.8.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.5.1 + github.com/prometheus/client_golang v1.6.0 github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.7.0 github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa diff --git a/go.sum b/go.sum index 23e50c9e0..94e3f0105 100644 --- a/go.sum +++ b/go.sum @@ -325,8 +325,8 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0 h1:miYCvYqFXtl/J9FIy8eNpBfYthAEFg+Ys0XyUVEcDsc= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.6.0 h1:YVPodQOcK15POxhgARIvnDRVpLcuK8mglnMrWfyrw6A= +github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= @@ -355,6 +355,8 @@ github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7z github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8 h1:+fpWZdT24pJBiqJdAwYBjPSk+5YmQzYNPYzQsdzLkt8= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= +github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= +github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -511,10 +513,11 @@ golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f h1:68K/z8GLUxV76xGSqwTWw2gyk/jwn79LUL43rES2g8o= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f h1:gWF768j/LaZugp8dyS4UwsslYCYz9XgFxvlgsn0n9H8= +golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= @@ -563,8 +566,6 @@ google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0 h1:bO/TA4OxCOummhSf10siHuG7vJOiwh7SpRpFZDkOgl4= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0 h1:2pJjwYOdkZ9HlN4sWRYBg9ttH5bCOlsueaM+b/oYjwo= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= diff --git a/node/node.go b/node/node.go index a59a3d19e..7623563d2 100644 --- a/node/node.go +++ b/node/node.go @@ -30,6 +30,7 @@ import ( "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" + lite "github.com/tendermint/tendermint/lite2" mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/p2p/pex" @@ -43,6 +44,7 @@ import ( "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/state/txindex/null" + "github.com/tendermint/tendermint/statesync" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" @@ -123,6 +125,12 @@ func DefaultMetricsProvider(config *cfg.InstrumentationConfig) MetricsProvider { // Option sets a parameter for the node. type Option func(*Node) +// Temporary interface for switching to fast sync, we should get rid of v0 and v1 reactors. +// See: https://github.com/tendermint/tendermint/issues/4595 +type fastSyncReactor interface { + SwitchToFastSync(sm.State) error +} + // CustomReactors allows you to add custom reactors (name -> p2p.Reactor) to // the node's Switch. // @@ -134,6 +142,7 @@ type Option func(*Node) // - CONSENSUS // - EVIDENCE // - PEX +// - STATESYNC func CustomReactors(reactors map[string]p2p.Reactor) Option { return func(n *Node) { for name, reactor := range reactors { @@ -147,6 +156,15 @@ func CustomReactors(reactors map[string]p2p.Reactor) Option { } } +// StateProvider overrides the state provider used by state sync to retrieve trusted app hashes and +// build a State object for bootstrapping the node. +// WARNING: this interface is considered unstable and subject to change. +func StateProvider(stateProvider statesync.StateProvider) Option { + return func(n *Node) { + n.stateSyncProvider = stateProvider + } +} + //------------------------------------------------------------------------------ // Node is the highest level interface to a full Tendermint node. @@ -168,21 +186,24 @@ type Node struct { isListening bool // services - eventBus *types.EventBus // pub/sub for services - stateDB dbm.DB - blockStore *store.BlockStore // store the blockchain to disk - bcReactor p2p.Reactor // for fast-syncing - mempoolReactor *mempl.Reactor // for gossipping transactions - mempool mempl.Mempool - consensusState *cs.State // latest consensus state - consensusReactor *cs.Reactor // for participating in the consensus - pexReactor *pex.Reactor // for exchanging peer addresses - evidencePool *evidence.Pool // tracking evidence - proxyApp proxy.AppConns // connection to the application - rpcListeners []net.Listener // rpc servers - txIndexer txindex.TxIndexer - indexerService *txindex.IndexerService - prometheusSrv *http.Server + eventBus *types.EventBus // pub/sub for services + stateDB dbm.DB + blockStore *store.BlockStore // store the blockchain to disk + bcReactor p2p.Reactor // for fast-syncing + mempoolReactor *mempl.Reactor // for gossipping transactions + mempool mempl.Mempool + stateSync bool // whether the node should state sync on startup + stateSyncReactor *statesync.Reactor // for hosting and restoring state sync snapshots + stateSyncProvider statesync.StateProvider // provides state data for bootstrapping a node + consensusState *cs.State // latest consensus state + consensusReactor *cs.Reactor // for participating in the consensus + pexReactor *pex.Reactor // for exchanging peer addresses + evidencePool *evidence.Pool // tracking evidence + proxyApp proxy.AppConns // connection to the application + rpcListeners []net.Listener // rpc servers + txIndexer txindex.TxIndexer + indexerService *txindex.IndexerService + prometheusSrv *http.Server } func initDBs(config *cfg.Config, dbProvider DBProvider) (blockStore *store.BlockStore, stateDB dbm.DB, err error) { @@ -369,7 +390,7 @@ func createConsensusReactor(config *cfg.Config, evidencePool *evidence.Pool, privValidator types.PrivValidator, csMetrics *cs.Metrics, - fastSync bool, + waitSync bool, eventBus *types.EventBus, consensusLogger log.Logger) (*consensus.Reactor, *consensus.State) { @@ -386,7 +407,7 @@ func createConsensusReactor(config *cfg.Config, if privValidator != nil { consensusState.SetPrivValidator(privValidator) } - consensusReactor := cs.NewReactor(consensusState, fastSync, cs.ReactorMetrics(csMetrics)) + consensusReactor := cs.NewReactor(consensusState, waitSync, cs.ReactorMetrics(csMetrics)) consensusReactor.SetLogger(consensusLogger) // services which will be publishing and/or subscribing for messages (events) // consensusReactor will set it on consensusState and blockExecutor @@ -469,6 +490,7 @@ func createSwitch(config *cfg.Config, peerFilters []p2p.PeerFilterFunc, mempoolReactor *mempl.Reactor, bcReactor p2p.Reactor, + stateSyncReactor *statesync.Reactor, consensusReactor *consensus.Reactor, evidenceReactor *evidence.Reactor, nodeInfo p2p.NodeInfo, @@ -486,6 +508,7 @@ func createSwitch(config *cfg.Config, sw.AddReactor("BLOCKCHAIN", bcReactor) sw.AddReactor("CONSENSUS", consensusReactor) sw.AddReactor("EVIDENCE", evidenceReactor) + sw.AddReactor("STATESYNC", stateSyncReactor) sw.SetNodeInfo(nodeInfo) sw.SetNodeKey(nodeKey) @@ -542,6 +565,59 @@ func createPEXReactorAndAddToSwitch(addrBook pex.AddrBook, config *cfg.Config, return pexReactor } +// startStateSync starts an asynchronous state sync process, then switches to fast sync mode. +func startStateSync(ssR *statesync.Reactor, bcR fastSyncReactor, conR *consensus.Reactor, + stateProvider statesync.StateProvider, config *cfg.StateSyncConfig, fastSync bool, + stateDB dbm.DB, blockStore *store.BlockStore) error { + ssR.Logger.Info("Starting state sync") + + state := sm.LoadState(stateDB) + if stateProvider == nil { + var err error + stateProvider, err = statesync.NewLightClientStateProvider(state.ChainID, state.Version, + config.RPCServers, lite.TrustOptions{ + Period: config.TrustPeriod, + Height: config.TrustHeight, + Hash: config.TrustHashBytes(), + }, ssR.Logger.With("module", "lite")) + if err != nil { + return fmt.Errorf("failed to set up light client state provider: %w", err) + } + } + + go func() { + state, commit, err := ssR.Sync(stateProvider) + if err != nil { + ssR.Logger.Error("State sync failed", "err", err) + return + } + err = sm.BootstrapState(stateDB, state) + if err != nil { + ssR.Logger.Error("Failed to bootstrap node with new state", "err", err) + return + } + err = blockStore.SaveSeenCommit(state.LastBlockHeight, commit) + if err != nil { + ssR.Logger.Error("Failed to store last seen commit", "err", err) + return + } + + if fastSync { + // FIXME Very ugly to have these metrics bleed through here. + conR.Metrics.StateSyncing.Set(0) + conR.Metrics.FastSyncing.Set(1) + err = bcR.SwitchToFastSync(state) + if err != nil { + ssR.Logger.Error("Failed to switch to fast sync", "err", err) + return + } + } else { + conR.SwitchToConsensus(state, true) + } + }() + return nil +} + // NewNode returns a new, ready to go, Tendermint Node. func NewNode(config *cfg.Config, privValidator types.PrivValidator, @@ -584,18 +660,6 @@ func NewNode(config *cfg.Config, return nil, err } - // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, - // and replays any blocks as necessary to sync tendermint with the app. - consensusLogger := logger.With("module", "consensus") - if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { - return nil, err - } - - // Reload the state. It will have the Version.Consensus.App set by the - // Handshake, and may have other modifications as well (ie. depending on - // what happened during block replay). - state = sm.LoadState(stateDB) - // If an address is provided, listen on the socket for a connection from an // external signing process. if config.PrivValidatorListenAddr != "" { @@ -611,11 +675,34 @@ func NewNode(config *cfg.Config, return nil, errors.Wrap(err, "can't get pubkey") } - logNodeStartupInfo(state, pubKey, logger, consensusLogger) - - // Decide whether to fast-sync or not + // Determine whether we should do state and/or fast sync. // We don't fast-sync when the only validator is us. fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) + stateSync := config.StateSync.Enable && !onlyValidatorIsUs(state, pubKey) + if stateSync && state.LastBlockHeight > 0 { + logger.Info("Found local state with non-zero height, skipping state sync") + stateSync = false + } + // Don't check fastSync == true, since the v2 reactor has a bug where it fast syncs regardless. + if stateSync && config.FastSync.Version == "v2" { + return nil, errors.New("state sync is not supported with blockchain v2 reactor") + } + + // Create the handshaker, which calls RequestInfo, sets the AppVersion on the state, + // and replays any blocks as necessary to sync tendermint with the app. + consensusLogger := logger.With("module", "consensus") + if !stateSync { + if err := doHandshake(stateDB, state, blockStore, genDoc, eventBus, proxyApp, consensusLogger); err != nil { + return nil, err + } + + // Reload the state. It will have the Version.Consensus.App set by the + // Handshake, and may have other modifications as well (ie. depending on + // what happened during block replay). + state = sm.LoadState(stateDB) + } + + logNodeStartupInfo(state, pubKey, logger, consensusLogger) csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) @@ -638,18 +725,32 @@ func NewNode(config *cfg.Config, sm.BlockExecutorWithMetrics(smMetrics), ) - // Make BlockchainReactor - bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync, logger) + // Make BlockchainReactor. Don't start fast sync if we're doing a state sync first. + bcReactor, err := createBlockchainReactor(config, state, blockExec, blockStore, fastSync && !stateSync, logger) if err != nil { return nil, errors.Wrap(err, "could not create blockchain reactor") } - // Make ConsensusReactor + // Make ConsensusReactor. Don't enable fully if doing a state sync and/or fast sync first. + // FIXME We need to update metrics here, since other reactors don't have access to them. + if stateSync { + csMetrics.StateSyncing.Set(1) + } else if fastSync { + csMetrics.FastSyncing.Set(1) + } consensusReactor, consensusState := createConsensusReactor( config, state, blockExec, blockStore, mempool, evidencePool, - privValidator, csMetrics, fastSync, eventBus, consensusLogger, + privValidator, csMetrics, stateSync || fastSync, eventBus, consensusLogger, ) + // Set up state sync reactor, and schedule a sync if requested. + // FIXME The way we do phased startups (e.g. replay -> fast sync -> consensus) is very messy, + // we should clean this whole thing up. See: + // https://github.com/tendermint/tendermint/issues/4644 + stateSyncReactor := statesync.NewReactor(proxyApp.Snapshot(), proxyApp.Query(), + config.StateSync.TempDir) + stateSyncReactor.SetLogger(logger.With("module", "statesync")) + nodeInfo, err := makeNodeInfo(config, nodeKey, txIndexer, genDoc, state) if err != nil { return nil, err @@ -662,7 +763,7 @@ func NewNode(config *cfg.Config, p2pLogger := logger.With("module", "p2p") sw := createSwitch( config, transport, p2pMetrics, peerFilters, mempoolReactor, bcReactor, - consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, + stateSyncReactor, consensusReactor, evidenceReactor, nodeInfo, nodeKey, p2pLogger, ) err = sw.AddPersistentPeers(splitAndTrimEmpty(config.P2P.PersistentPeers, ",", " ")) @@ -721,6 +822,8 @@ func NewNode(config *cfg.Config, mempool: mempool, consensusState: consensusState, consensusReactor: consensusReactor, + stateSyncReactor: stateSyncReactor, + stateSync: stateSync, pexReactor: pexReactor, evidencePool: evidencePool, proxyApp: proxyApp, @@ -791,6 +894,19 @@ func (n *Node) OnStart() error { return errors.Wrap(err, "could not dial peers from persistent_peers field") } + // Run state sync + if n.stateSync { + bcR, ok := n.bcReactor.(fastSyncReactor) + if !ok { + return fmt.Errorf("this blockchain reactor does not support switching from state sync") + } + err := startStateSync(n.stateSyncReactor, bcR, n.consensusReactor, n.stateSyncProvider, + n.config.StateSync, n.config.FastSyncMode, n.stateDB, n.blockStore) + if err != nil { + return fmt.Errorf("failed to start state sync: %w", err) + } + } + return nil } @@ -1106,6 +1222,7 @@ func makeNodeInfo( cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, mempl.MempoolChannel, evidence.EvidenceChannel, + statesync.SnapshotChannel, statesync.ChunkChannel, }, Moniker: config.Moniker, Other: p2p.DefaultNodeInfoOther{ diff --git a/p2p/mocks/peer.go b/p2p/mocks/peer.go new file mode 100644 index 000000000..f874e0405 --- /dev/null +++ b/p2p/mocks/peer.go @@ -0,0 +1,331 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + log "github.com/tendermint/tendermint/libs/log" + conn "github.com/tendermint/tendermint/p2p/conn" + + mock "github.com/stretchr/testify/mock" + + net "net" + + p2p "github.com/tendermint/tendermint/p2p" +) + +// Peer is an autogenerated mock type for the Peer type +type Peer struct { + mock.Mock +} + +// CloseConn provides a mock function with given fields: +func (_m *Peer) CloseConn() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FlushStop provides a mock function with given fields: +func (_m *Peer) FlushStop() { + _m.Called() +} + +// Get provides a mock function with given fields: _a0 +func (_m *Peer) Get(_a0 string) interface{} { + ret := _m.Called(_a0) + + var r0 interface{} + if rf, ok := ret.Get(0).(func(string) interface{}); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + return r0 +} + +// ID provides a mock function with given fields: +func (_m *Peer) ID() p2p.ID { + ret := _m.Called() + + var r0 p2p.ID + if rf, ok := ret.Get(0).(func() p2p.ID); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(p2p.ID) + } + + return r0 +} + +// IsOutbound provides a mock function with given fields: +func (_m *Peer) IsOutbound() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// IsPersistent provides a mock function with given fields: +func (_m *Peer) IsPersistent() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// IsRunning provides a mock function with given fields: +func (_m *Peer) IsRunning() bool { + ret := _m.Called() + + var r0 bool + if rf, ok := ret.Get(0).(func() bool); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// NodeInfo provides a mock function with given fields: +func (_m *Peer) NodeInfo() p2p.NodeInfo { + ret := _m.Called() + + var r0 p2p.NodeInfo + if rf, ok := ret.Get(0).(func() p2p.NodeInfo); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(p2p.NodeInfo) + } + } + + return r0 +} + +// OnReset provides a mock function with given fields: +func (_m *Peer) OnReset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStart provides a mock function with given fields: +func (_m *Peer) OnStart() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// OnStop provides a mock function with given fields: +func (_m *Peer) OnStop() { + _m.Called() +} + +// Quit provides a mock function with given fields: +func (_m *Peer) Quit() <-chan struct{} { + ret := _m.Called() + + var r0 <-chan struct{} + if rf, ok := ret.Get(0).(func() <-chan struct{}); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(<-chan struct{}) + } + } + + return r0 +} + +// RemoteAddr provides a mock function with given fields: +func (_m *Peer) RemoteAddr() net.Addr { + ret := _m.Called() + + var r0 net.Addr + if rf, ok := ret.Get(0).(func() net.Addr); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.Addr) + } + } + + return r0 +} + +// RemoteIP provides a mock function with given fields: +func (_m *Peer) RemoteIP() net.IP { + ret := _m.Called() + + var r0 net.IP + if rf, ok := ret.Get(0).(func() net.IP); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(net.IP) + } + } + + return r0 +} + +// Reset provides a mock function with given fields: +func (_m *Peer) Reset() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Send provides a mock function with given fields: _a0, _a1 +func (_m *Peer) Send(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} + +// Set provides a mock function with given fields: _a0, _a1 +func (_m *Peer) Set(_a0 string, _a1 interface{}) { + _m.Called(_a0, _a1) +} + +// SetLogger provides a mock function with given fields: _a0 +func (_m *Peer) SetLogger(_a0 log.Logger) { + _m.Called(_a0) +} + +// SocketAddr provides a mock function with given fields: +func (_m *Peer) SocketAddr() *p2p.NetAddress { + ret := _m.Called() + + var r0 *p2p.NetAddress + if rf, ok := ret.Get(0).(func() *p2p.NetAddress); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*p2p.NetAddress) + } + } + + return r0 +} + +// Start provides a mock function with given fields: +func (_m *Peer) Start() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Status provides a mock function with given fields: +func (_m *Peer) Status() conn.ConnectionStatus { + ret := _m.Called() + + var r0 conn.ConnectionStatus + if rf, ok := ret.Get(0).(func() conn.ConnectionStatus); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(conn.ConnectionStatus) + } + + return r0 +} + +// Stop provides a mock function with given fields: +func (_m *Peer) Stop() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// String provides a mock function with given fields: +func (_m *Peer) String() string { + ret := _m.Called() + + var r0 string + if rf, ok := ret.Get(0).(func() string); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(string) + } + + return r0 +} + +// TrySend provides a mock function with given fields: _a0, _a1 +func (_m *Peer) TrySend(_a0 byte, _a1 []byte) bool { + ret := _m.Called(_a0, _a1) + + var r0 bool + if rf, ok := ret.Get(0).(func(byte, []byte) bool); ok { + r0 = rf(_a0, _a1) + } else { + r0 = ret.Get(0).(bool) + } + + return r0 +} diff --git a/p2p/peer.go b/p2p/peer.go index 7a6d6f868..35727cbf6 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -12,6 +12,8 @@ import ( tmconn "github.com/tendermint/tendermint/p2p/conn" ) +//go:generate mockery -case underscore -name Peer + const metricsTickerDuration = 10 * time.Second // Peer is an interface representing a peer connected on a reactor. diff --git a/proxy/app_conn.go b/proxy/app_conn.go index 066d17295..32e647cd6 100644 --- a/proxy/app_conn.go +++ b/proxy/app_conn.go @@ -5,6 +5,8 @@ import ( "github.com/tendermint/tendermint/abci/types" ) +//go:generate mockery -case underscore -name AppConnConsensus|AppConnMempool|AppConnQuery|AppConnSnapshot + //---------------------------------------------------------------------------------------- // Enforce which abci msgs can be sent on a connection at the type level @@ -40,6 +42,15 @@ type AppConnQuery interface { // SetOptionSync(key string, value string) (res types.Result) } +type AppConnSnapshot interface { + Error() error + + ListSnapshotsSync(types.RequestListSnapshots) (*types.ResponseListSnapshots, error) + OfferSnapshotSync(types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) + LoadSnapshotChunkSync(types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) + ApplySnapshotChunkSync(types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) +} + //----------------------------------------------------------------------------------------- // Implements AppConnConsensus (subset of abcicli.Client) @@ -142,3 +153,38 @@ func (app *appConnQuery) InfoSync(req types.RequestInfo) (*types.ResponseInfo, e func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (*types.ResponseQuery, error) { return app.appConn.QuerySync(reqQuery) } + +//------------------------------------------------ +// Implements AppConnSnapshot (subset of abcicli.Client) + +type appConnSnapshot struct { + appConn abcicli.Client +} + +func NewAppConnSnapshot(appConn abcicli.Client) AppConnSnapshot { + return &appConnSnapshot{ + appConn: appConn, + } +} + +func (app *appConnSnapshot) Error() error { + return app.appConn.Error() +} + +func (app *appConnSnapshot) ListSnapshotsSync(req types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + return app.appConn.ListSnapshotsSync(req) +} + +func (app *appConnSnapshot) OfferSnapshotSync(req types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + return app.appConn.OfferSnapshotSync(req) +} + +func (app *appConnSnapshot) LoadSnapshotChunkSync( + req types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + return app.appConn.LoadSnapshotChunkSync(req) +} + +func (app *appConnSnapshot) ApplySnapshotChunkSync( + req types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + return app.appConn.ApplySnapshotChunkSync(req) +} diff --git a/proxy/mocks/app_conn_consensus.go b/proxy/mocks/app_conn_consensus.go new file mode 100644 index 000000000..3727c372f --- /dev/null +++ b/proxy/mocks/app_conn_consensus.go @@ -0,0 +1,142 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + abcicli "github.com/tendermint/tendermint/abci/client" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnConsensus is an autogenerated mock type for the AppConnConsensus type +type AppConnConsensus struct { + mock.Mock +} + +// BeginBlockSync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) BeginBlockSync(_a0 types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseBeginBlock + if rf, ok := ret.Get(0).(func(types.RequestBeginBlock) *types.ResponseBeginBlock); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseBeginBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestBeginBlock) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// CommitSync provides a mock function with given fields: +func (_m *AppConnConsensus) CommitSync() (*types.ResponseCommit, error) { + ret := _m.Called() + + var r0 *types.ResponseCommit + if rf, ok := ret.Get(0).(func() *types.ResponseCommit); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseCommit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func() error); ok { + r1 = rf() + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// DeliverTxAsync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) DeliverTxAsync(_a0 types.RequestDeliverTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestDeliverTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// EndBlockSync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) EndBlockSync(_a0 types.RequestEndBlock) (*types.ResponseEndBlock, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseEndBlock + if rf, ok := ret.Get(0).(func(types.RequestEndBlock) *types.ResponseEndBlock); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseEndBlock) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestEndBlock) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnConsensus) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InitChainSync provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) InitChainSync(_a0 types.RequestInitChain) (*types.ResponseInitChain, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseInitChain + if rf, ok := ret.Get(0).(func(types.RequestInitChain) *types.ResponseInitChain); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInitChain) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInitChain) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// SetResponseCallback provides a mock function with given fields: _a0 +func (_m *AppConnConsensus) SetResponseCallback(_a0 abcicli.Callback) { + _m.Called(_a0) +} diff --git a/proxy/mocks/app_conn_mempool.go b/proxy/mocks/app_conn_mempool.go new file mode 100644 index 000000000..e98fbe465 --- /dev/null +++ b/proxy/mocks/app_conn_mempool.go @@ -0,0 +1,80 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + abcicli "github.com/tendermint/tendermint/abci/client" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnMempool is an autogenerated mock type for the AppConnMempool type +type AppConnMempool struct { + mock.Mock +} + +// CheckTxAsync provides a mock function with given fields: _a0 +func (_m *AppConnMempool) CheckTxAsync(_a0 types.RequestCheckTx) *abcicli.ReqRes { + ret := _m.Called(_a0) + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func(types.RequestCheckTx) *abcicli.ReqRes); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// Error provides a mock function with given fields: +func (_m *AppConnMempool) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// FlushAsync provides a mock function with given fields: +func (_m *AppConnMempool) FlushAsync() *abcicli.ReqRes { + ret := _m.Called() + + var r0 *abcicli.ReqRes + if rf, ok := ret.Get(0).(func() *abcicli.ReqRes); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*abcicli.ReqRes) + } + } + + return r0 +} + +// FlushSync provides a mock function with given fields: +func (_m *AppConnMempool) FlushSync() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// SetResponseCallback provides a mock function with given fields: _a0 +func (_m *AppConnMempool) SetResponseCallback(_a0 abcicli.Callback) { + _m.Called(_a0) +} diff --git a/proxy/mocks/app_conn_query.go b/proxy/mocks/app_conn_query.go new file mode 100644 index 000000000..5274a72b9 --- /dev/null +++ b/proxy/mocks/app_conn_query.go @@ -0,0 +1,97 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnQuery is an autogenerated mock type for the AppConnQuery type +type AppConnQuery struct { + mock.Mock +} + +// EchoSync provides a mock function with given fields: _a0 +func (_m *AppConnQuery) EchoSync(_a0 string) (*types.ResponseEcho, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseEcho + if rf, ok := ret.Get(0).(func(string) *types.ResponseEcho); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseEcho) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(string) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnQuery) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// InfoSync provides a mock function with given fields: _a0 +func (_m *AppConnQuery) InfoSync(_a0 types.RequestInfo) (*types.ResponseInfo, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseInfo + if rf, ok := ret.Get(0).(func(types.RequestInfo) *types.ResponseInfo); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseInfo) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestInfo) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QuerySync provides a mock function with given fields: _a0 +func (_m *AppConnQuery) QuerySync(_a0 types.RequestQuery) (*types.ResponseQuery, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseQuery + if rf, ok := ret.Get(0).(func(types.RequestQuery) *types.ResponseQuery); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseQuery) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestQuery) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/proxy/mocks/app_conn_snapshot.go b/proxy/mocks/app_conn_snapshot.go new file mode 100644 index 000000000..e5ad235cf --- /dev/null +++ b/proxy/mocks/app_conn_snapshot.go @@ -0,0 +1,120 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + + types "github.com/tendermint/tendermint/abci/types" +) + +// AppConnSnapshot is an autogenerated mock type for the AppConnSnapshot type +type AppConnSnapshot struct { + mock.Mock +} + +// ApplySnapshotChunkSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) ApplySnapshotChunkSync(_a0 types.RequestApplySnapshotChunk) (*types.ResponseApplySnapshotChunk, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseApplySnapshotChunk + if rf, ok := ret.Get(0).(func(types.RequestApplySnapshotChunk) *types.ResponseApplySnapshotChunk); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseApplySnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestApplySnapshotChunk) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Error provides a mock function with given fields: +func (_m *AppConnSnapshot) Error() error { + ret := _m.Called() + + var r0 error + if rf, ok := ret.Get(0).(func() error); ok { + r0 = rf() + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// ListSnapshotsSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) ListSnapshotsSync(_a0 types.RequestListSnapshots) (*types.ResponseListSnapshots, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseListSnapshots + if rf, ok := ret.Get(0).(func(types.RequestListSnapshots) *types.ResponseListSnapshots); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseListSnapshots) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestListSnapshots) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// LoadSnapshotChunkSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) LoadSnapshotChunkSync(_a0 types.RequestLoadSnapshotChunk) (*types.ResponseLoadSnapshotChunk, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseLoadSnapshotChunk + if rf, ok := ret.Get(0).(func(types.RequestLoadSnapshotChunk) *types.ResponseLoadSnapshotChunk); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseLoadSnapshotChunk) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestLoadSnapshotChunk) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// OfferSnapshotSync provides a mock function with given fields: _a0 +func (_m *AppConnSnapshot) OfferSnapshotSync(_a0 types.RequestOfferSnapshot) (*types.ResponseOfferSnapshot, error) { + ret := _m.Called(_a0) + + var r0 *types.ResponseOfferSnapshot + if rf, ok := ret.Get(0).(func(types.RequestOfferSnapshot) *types.ResponseOfferSnapshot); ok { + r0 = rf(_a0) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.ResponseOfferSnapshot) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(types.RequestOfferSnapshot) error); ok { + r1 = rf(_a0) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go index 364114115..753d9a55e 100644 --- a/proxy/multi_app_conn.go +++ b/proxy/multi_app_conn.go @@ -1,7 +1,7 @@ package proxy import ( - "github.com/pkg/errors" + "fmt" "github.com/tendermint/tendermint/libs/service" ) @@ -15,6 +15,7 @@ type AppConns interface { Mempool() AppConnMempool Consensus() AppConnConsensus Query() AppConnQuery + Snapshot() AppConnSnapshot } func NewAppConns(clientCreator ClientCreator) AppConns { @@ -33,6 +34,7 @@ type multiAppConn struct { mempoolConn AppConnMempool consensusConn AppConnConsensus queryConn AppConnQuery + snapshotConn AppConnSnapshot clientCreator ClientCreator } @@ -61,37 +63,53 @@ func (app *multiAppConn) Query() AppConnQuery { return app.queryConn } +// Returns the snapshot Connection +func (app *multiAppConn) Snapshot() AppConnSnapshot { + return app.snapshotConn +} + func (app *multiAppConn) OnStart() error { // query connection querycli, err := app.clientCreator.NewABCIClient() if err != nil { - return errors.Wrap(err, "Error creating ABCI client (query connection)") + return fmt.Errorf("error creating ABCI client (query connection): %w", err) } querycli.SetLogger(app.Logger.With("module", "abci-client", "connection", "query")) if err := querycli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (query connection)") + return fmt.Errorf("error starting ABCI client (query connection): %w", err) } app.queryConn = NewAppConnQuery(querycli) + // snapshot connection + snapshotcli, err := app.clientCreator.NewABCIClient() + if err != nil { + return fmt.Errorf("error creating ABCI client (snapshot connection): %w", err) + } + snapshotcli.SetLogger(app.Logger.With("module", "abci-client", "connection", "snapshot")) + if err := snapshotcli.Start(); err != nil { + return fmt.Errorf("error starting ABCI client (snapshot connection): %w", err) + } + app.snapshotConn = NewAppConnSnapshot(snapshotcli) + // mempool connection memcli, err := app.clientCreator.NewABCIClient() if err != nil { - return errors.Wrap(err, "Error creating ABCI client (mempool connection)") + return fmt.Errorf("error creating ABCI client (mempool connection): %w", err) } memcli.SetLogger(app.Logger.With("module", "abci-client", "connection", "mempool")) if err := memcli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (mempool connection)") + return fmt.Errorf("error starting ABCI client (mempool connection): %w", err) } app.mempoolConn = NewAppConnMempool(memcli) // consensus connection concli, err := app.clientCreator.NewABCIClient() if err != nil { - return errors.Wrap(err, "Error creating ABCI client (consensus connection)") + return fmt.Errorf("error creating ABCI client (consensus connection): %w", err) } concli.SetLogger(app.Logger.With("module", "abci-client", "connection", "consensus")) if err := concli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (consensus connection)") + return fmt.Errorf("error starting ABCI client (consensus connection): %w", err) } app.consensusConn = NewAppConnConsensus(concli) diff --git a/rpc/core/status.go b/rpc/core/status.go index 4e950d4a3..67c43ea0d 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -31,7 +31,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { } var latestHeight int64 - if consensusReactor.FastSync() { + if consensusReactor.WaitSync() { latestHeight = blockStore.Height() } else { latestHeight = consensusState.GetLastHeight() @@ -66,7 +66,7 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { EarliestAppHash: earliestAppHash, EarliestBlockHeight: earliestBlockHeight, EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), - CatchingUp: consensusReactor.FastSync(), + CatchingUp: consensusReactor.WaitSync(), }, ValidatorInfo: ctypes.ValidatorInfo{ Address: pubKey.Address(), diff --git a/state/store.go b/state/store.go index 149fd595b..35e239731 100644 --- a/state/store.go +++ b/state/store.go @@ -117,6 +117,16 @@ func saveState(db dbm.DB, state State, key []byte) { } } +// BootstrapState saves a new state, used e.g. by state sync when starting from non-zero height. +func BootstrapState(db dbm.DB, state State) error { + height := state.LastBlockHeight + saveValidatorsInfo(db, height, height, state.LastValidators) + saveValidatorsInfo(db, height+1, height+1, state.Validators) + saveValidatorsInfo(db, height+2, height+2, state.NextValidators) + saveConsensusParamsInfo(db, height+1, height+1, state.ConsensusParams) + return db.SetSync(stateKey, state.Bytes()) +} + //------------------------------------------------------------------------ // ABCIResponses retains the responses diff --git a/statesync/chunks.go b/statesync/chunks.go new file mode 100644 index 000000000..bfbcdb8d8 --- /dev/null +++ b/statesync/chunks.go @@ -0,0 +1,321 @@ +package statesync + +import ( + "errors" + "fmt" + "io/ioutil" + "os" + "path/filepath" + "strconv" + "sync" + "time" + + "github.com/tendermint/tendermint/p2p" +) + +// errDone is returned by chunkQueue.Next() when all chunks have been returned. +var errDone = errors.New("chunk queue has completed") + +// chunk contains data for a chunk. +type chunk struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Sender p2p.ID +} + +// chunkQueue manages chunks for a state sync process, ordering them if requested. It acts as an +// iterator over all chunks, but callers can request chunks to be retried, optionally after +// refetching. +type chunkQueue struct { + sync.Mutex + snapshot *snapshot // if this is nil, the queue has been closed + dir string // temp dir for on-disk chunk storage + chunkFiles map[uint32]string // path to temporary chunk file + chunkSenders map[uint32]p2p.ID // the peer who sent the given chunk + chunkAllocated map[uint32]bool // chunks that have been allocated via Allocate() + chunkReturned map[uint32]bool // chunks returned via Next() + waiters map[uint32][]chan<- uint32 // signals WaitFor() waiters about chunk arrival +} + +// newChunkQueue creates a new chunk queue for a snapshot, using a temp dir for storage. +// Callers must call Close() when done. +func newChunkQueue(snapshot *snapshot, tempDir string) (*chunkQueue, error) { + dir, err := ioutil.TempDir(tempDir, "tm-statesync") + if err != nil { + return nil, fmt.Errorf("unable to create temp dir for state sync chunks: %w", err) + } + if snapshot.Chunks == 0 { + return nil, errors.New("snapshot has no chunks") + } + return &chunkQueue{ + snapshot: snapshot, + dir: dir, + chunkFiles: make(map[uint32]string, snapshot.Chunks), + chunkSenders: make(map[uint32]p2p.ID, snapshot.Chunks), + chunkAllocated: make(map[uint32]bool, snapshot.Chunks), + chunkReturned: make(map[uint32]bool, snapshot.Chunks), + waiters: make(map[uint32][]chan<- uint32), + }, nil +} + +// Add adds a chunk to the queue. It ignores chunks that already exist, returning false. +func (q *chunkQueue) Add(chunk *chunk) (bool, error) { + if chunk == nil || chunk.Chunk == nil { + return false, errors.New("cannot add nil chunk") + } + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return false, nil // queue is closed + } + if chunk.Height != q.snapshot.Height { + return false, fmt.Errorf("invalid chunk height %v, expected %v", chunk.Height, q.snapshot.Height) + } + if chunk.Format != q.snapshot.Format { + return false, fmt.Errorf("invalid chunk format %v, expected %v", chunk.Format, q.snapshot.Format) + } + if chunk.Index >= q.snapshot.Chunks { + return false, fmt.Errorf("received unexpected chunk %v", chunk.Index) + } + if q.chunkFiles[chunk.Index] != "" { + return false, nil + } + + path := filepath.Join(q.dir, strconv.FormatUint(uint64(chunk.Index), 10)) + err := ioutil.WriteFile(path, chunk.Chunk, 0644) + if err != nil { + return false, fmt.Errorf("failed to save chunk %v to file %v: %w", chunk.Index, path, err) + } + q.chunkFiles[chunk.Index] = path + q.chunkSenders[chunk.Index] = chunk.Sender + + // Signal any waiters that the chunk has arrived. + for _, waiter := range q.waiters[chunk.Index] { + waiter <- chunk.Index + close(waiter) + } + delete(q.waiters, chunk.Index) + + return true, nil +} + +// Allocate allocates a chunk to the caller, making it responsible for fetching it. Returns +// errDone once no chunks are left or the queue is closed. +func (q *chunkQueue) Allocate() (uint32, error) { + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return 0, errDone + } + if uint32(len(q.chunkAllocated)) >= q.snapshot.Chunks { + return 0, errDone + } + for i := uint32(0); i < q.snapshot.Chunks; i++ { + if !q.chunkAllocated[i] { + q.chunkAllocated[i] = true + return i, nil + } + } + return 0, errDone +} + +// Close closes the chunk queue, cleaning up all temporary files. +func (q *chunkQueue) Close() error { + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return nil + } + for _, waiters := range q.waiters { + for _, waiter := range waiters { + close(waiter) + } + } + q.waiters = nil + q.snapshot = nil + err := os.RemoveAll(q.dir) + if err != nil { + return fmt.Errorf("failed to clean up state sync tempdir %v: %w", q.dir, err) + } + return nil +} + +// Discard discards a chunk. It will be removed from the queue, available for allocation, and can +// be added and returned via Next() again. If the chunk is not already in the queue this does +// nothing, to avoid it being allocated to multiple fetchers. +func (q *chunkQueue) Discard(index uint32) error { + q.Lock() + defer q.Unlock() + return q.discard(index) +} + +// discard discards a chunk, scheduling it for refetching. The caller must hold the mutex lock. +func (q *chunkQueue) discard(index uint32) error { + if q.snapshot == nil { + return nil + } + path := q.chunkFiles[index] + if path == "" { + return nil + } + err := os.Remove(path) + if err != nil { + return fmt.Errorf("failed to remove chunk %v: %w", index, err) + } + delete(q.chunkFiles, index) + delete(q.chunkReturned, index) + delete(q.chunkAllocated, index) + return nil +} + +// DiscardSender discards all *unreturned* chunks from a given sender. If the caller wants to +// discard already returned chunks, this can be done via Discard(). +func (q *chunkQueue) DiscardSender(peerID p2p.ID) error { + q.Lock() + defer q.Unlock() + + for index, sender := range q.chunkSenders { + if sender == peerID && !q.chunkReturned[index] { + err := q.discard(index) + if err != nil { + return err + } + delete(q.chunkSenders, index) + } + } + return nil +} + +// GetSender returns the sender of the chunk with the given index, or empty if not found. +func (q *chunkQueue) GetSender(index uint32) p2p.ID { + q.Lock() + defer q.Unlock() + return q.chunkSenders[index] +} + +// Has checks whether a chunk exists in the queue. +func (q *chunkQueue) Has(index uint32) bool { + q.Lock() + defer q.Unlock() + return q.chunkFiles[index] != "" +} + +// load loads a chunk from disk, or nil if the chunk is not in the queue. The caller must hold the +// mutex lock. +func (q *chunkQueue) load(index uint32) (*chunk, error) { + path, ok := q.chunkFiles[index] + if !ok { + return nil, nil + } + body, err := ioutil.ReadFile(path) + if err != nil { + return nil, fmt.Errorf("failed to load chunk %v: %w", index, err) + } + return &chunk{ + Height: q.snapshot.Height, + Format: q.snapshot.Format, + Index: index, + Chunk: body, + Sender: q.chunkSenders[index], + }, nil +} + +// Next returns the next chunk from the queue, or errDone if all chunks have been returned. It +// blocks until the chunk is available. Concurrent Next() calls may return the same chunk. +func (q *chunkQueue) Next() (*chunk, error) { + q.Lock() + var chunk *chunk + index, err := q.nextUp() + if err == nil { + chunk, err = q.load(index) + if err == nil { + q.chunkReturned[index] = true + } + } + q.Unlock() + if chunk != nil || err != nil { + return chunk, err + } + + select { + case _, ok := <-q.WaitFor(index): + if !ok { + return nil, errDone // queue closed + } + case <-time.After(chunkTimeout): + return nil, errTimeout + } + + q.Lock() + defer q.Unlock() + chunk, err = q.load(index) + if err != nil { + return nil, err + } + q.chunkReturned[index] = true + return chunk, nil +} + +// nextUp returns the next chunk to be returned, or errDone if all chunks have been returned. The +// caller must hold the mutex lock. +func (q *chunkQueue) nextUp() (uint32, error) { + if q.snapshot == nil { + return 0, errDone + } + for i := uint32(0); i < q.snapshot.Chunks; i++ { + if !q.chunkReturned[i] { + return i, nil + } + } + return 0, errDone +} + +// Retry schedules a chunk to be retried, without refetching it. +func (q *chunkQueue) Retry(index uint32) { + q.Lock() + defer q.Unlock() + delete(q.chunkReturned, index) +} + +// RetryAll schedules all chunks to be retried, without refetching them. +func (q *chunkQueue) RetryAll() { + q.Lock() + defer q.Unlock() + q.chunkReturned = make(map[uint32]bool) +} + +// Size returns the total number of chunks for the snapshot and queue, or 0 when closed. +func (q *chunkQueue) Size() uint32 { + q.Lock() + defer q.Unlock() + if q.snapshot == nil { + return 0 + } + return q.snapshot.Chunks +} + +// WaitFor returns a channel that receives a chunk index when it arrives in the queue, or +// immediately if it has already arrived. The channel is closed without a value if the queue is +// closed or if the chunk index is not valid. +func (q *chunkQueue) WaitFor(index uint32) <-chan uint32 { + q.Lock() + defer q.Unlock() + ch := make(chan uint32, 1) + switch { + case q.snapshot == nil: + close(ch) + case index >= q.snapshot.Chunks: + close(ch) + case q.chunkFiles[index] != "": + ch <- index + close(ch) + default: + if q.waiters[index] == nil { + q.waiters[index] = make([]chan<- uint32, 0) + } + q.waiters[index] = append(q.waiters[index], ch) + } + return ch +} diff --git a/statesync/chunks_test.go b/statesync/chunks_test.go new file mode 100644 index 000000000..2b9a5d751 --- /dev/null +++ b/statesync/chunks_test.go @@ -0,0 +1,551 @@ +package statesync + +import ( + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/p2p" +) + +func setupChunkQueue(t *testing.T) (*chunkQueue, func()) { + snapshot := &snapshot{ + Height: 3, + Format: 1, + Chunks: 5, + Hash: []byte{7}, + Metadata: nil, + } + queue, err := newChunkQueue(snapshot, "") + require.NoError(t, err) + teardown := func() { + err := queue.Close() + require.NoError(t, err) + } + return queue, teardown +} + +func TestNewChunkQueue_TempDir(t *testing.T) { + snapshot := &snapshot{ + Height: 3, + Format: 1, + Chunks: 5, + Hash: []byte{7}, + Metadata: nil, + } + dir, err := ioutil.TempDir("", "newchunkqueue") + require.NoError(t, err) + defer os.RemoveAll(dir) + queue, err := newChunkQueue(snapshot, dir) + require.NoError(t, err) + + files, err := ioutil.ReadDir(dir) + require.NoError(t, err) + assert.Len(t, files, 1) + + err = queue.Close() + require.NoError(t, err) + + files, err = ioutil.ReadDir(dir) + require.NoError(t, err) + assert.Len(t, files, 0) +} + +func TestChunkQueue(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Adding the first chunk should be fine + added, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.True(t, added) + + // Adding the last chunk should also be fine + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}}) + require.NoError(t, err) + assert.True(t, added) + + // Adding the first or last chunks again should return false + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.False(t, added) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}}) + require.NoError(t, err) + assert.False(t, added) + + // Adding the remaining chunks in reverse should be fine + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}}) + require.NoError(t, err) + assert.True(t, added) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}}) + require.NoError(t, err) + assert.True(t, added) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) + require.NoError(t, err) + assert.True(t, added) + + // At this point, we should be able to retrieve them all via Next + for i := 0; i < 5; i++ { + c, err := queue.Next() + require.NoError(t, err) + assert.Equal(t, &chunk{Height: 3, Format: 1, Index: uint32(i), Chunk: []byte{3, 1, byte(i)}}, c) + } + _, err = queue.Next() + require.Error(t, err) + assert.Equal(t, errDone, err) + + // It should still be possible to try to add chunks (which will be ignored) + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.False(t, added) + + // After closing the queue it will also return false + err = queue.Close() + require.NoError(t, err) + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + assert.False(t, added) + + // Closing the queue again should also be fine + err = queue.Close() + require.NoError(t, err) +} + +func TestChunkQueue_Add_ChunkErrors(t *testing.T) { + testcases := map[string]struct { + chunk *chunk + }{ + "nil chunk": {nil}, + "nil body": {&chunk{Height: 3, Format: 1, Index: 0, Chunk: nil}}, + "wrong height": {&chunk{Height: 9, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}}, + "wrong format": {&chunk{Height: 3, Format: 9, Index: 0, Chunk: []byte{3, 1, 0}}}, + "invalid index": {&chunk{Height: 3, Format: 1, Index: 5, Chunk: []byte{3, 1, 0}}}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + _, err := queue.Add(tc.chunk) + require.Error(t, err) + }) + } +} + +func TestChunkQueue_Allocate(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + for i := uint32(0); i < queue.Size(); i++ { + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, i, index) + } + + _, err := queue.Allocate() + require.Error(t, err) + assert.Equal(t, errDone, err) + + for i := uint32(0); i < queue.Size(); i++ { + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + } + + // After all chunks have been allocated and retrieved, discarding a chunk will reallocate it. + err = queue.Discard(2) + require.NoError(t, err) + + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 2, index) + _, err = queue.Allocate() + require.Error(t, err) + assert.Equal(t, errDone, err) + + // Discarding a chunk the closing the queue will return errDone. + err = queue.Discard(2) + require.NoError(t, err) + err = queue.Close() + require.NoError(t, err) + _, err = queue.Allocate() + require.Error(t, err) + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Discard(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Add a few chunks to the queue and fetch a couple + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{byte(0)}}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{byte(1)}}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{byte(2)}}) + require.NoError(t, err) + + c, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 0, c.Index) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 1, c.Index) + + // Discarding the first chunk and re-adding it should cause it to be returned + // immediately by Next(), before procceeding with chunk 2 + err = queue.Discard(0) + require.NoError(t, err) + added, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{byte(0)}}) + require.NoError(t, err) + assert.True(t, added) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 0, c.Index) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 2, c.Index) + + // Discard then allocate, add and fetch all chunks + for i := uint32(0); i < queue.Size(); i++ { + err := queue.Discard(i) + require.NoError(t, err) + } + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + c, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, i, c.Index) + } + + // Discarding a non-existent chunk does nothing. + err = queue.Discard(99) + require.NoError(t, err) + + // When discard a couple of chunks, we should be able to allocate, add, and fetch them again. + err = queue.Discard(3) + require.NoError(t, err) + err = queue.Discard(1) + require.NoError(t, err) + + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 1, index) + index, err = queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 3, index) + + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3}}) + require.NoError(t, err) + assert.True(t, added) + added, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{1}}) + require.NoError(t, err) + assert.True(t, added) + + chunk, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 1, chunk.Index) + + chunk, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 3, chunk.Index) + + _, err = queue.Next() + require.Error(t, err) + assert.Equal(t, errDone, err) + + // After closing the queue, discarding does nothing + err = queue.Close() + require.NoError(t, err) + err = queue.Discard(2) + require.NoError(t, err) +} + +func TestChunkQueue_DiscardSender(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Allocate and add all chunks to the queue + senders := []p2p.ID{"a", "b", "c"} + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{ + Height: 3, + Format: 1, + Index: i, + Chunk: []byte{byte(i)}, + Sender: senders[int(i)%len(senders)], + }) + require.NoError(t, err) + } + + // Fetch the first three chunks + for i := uint32(0); i < 3; i++ { + _, err := queue.Next() + require.NoError(t, err) + } + + // Discarding an unknown sender should do nothing + err := queue.DiscardSender("x") + require.NoError(t, err) + _, err = queue.Allocate() + assert.Equal(t, errDone, err) + + // Discarding sender b should discard chunk 4, but not chunk 1 which has already been + // returned. + err = queue.DiscardSender("b") + require.NoError(t, err) + index, err := queue.Allocate() + require.NoError(t, err) + assert.EqualValues(t, 4, index) + _, err = queue.Allocate() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_GetSender(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{1}, Sender: p2p.ID("a")}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{2}, Sender: p2p.ID("b")}) + require.NoError(t, err) + + assert.EqualValues(t, "a", queue.GetSender(0)) + assert.EqualValues(t, "b", queue.GetSender(1)) + assert.EqualValues(t, "", queue.GetSender(2)) + + // After the chunk has been processed, we should still know who the sender was + chunk, err := queue.Next() + require.NoError(t, err) + require.NotNil(t, chunk) + require.EqualValues(t, 0, chunk.Index) + assert.EqualValues(t, "a", queue.GetSender(0)) +} + +func TestChunkQueue_Next(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Next should block waiting for the next chunks, even when given out of order. + chNext := make(chan *chunk, 10) + go func() { + for { + c, err := queue.Next() + if err == errDone { + close(chNext) + break + } + require.NoError(t, err) + chNext <- c + } + }() + + assert.Empty(t, chNext) + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}) + require.NoError(t, err) + select { + case <-chNext: + assert.Fail(t, "channel should be empty") + default: + } + + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}) + require.NoError(t, err) + + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}, Sender: p2p.ID("a")}, + <-chNext) + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}, Sender: p2p.ID("b")}, + <-chNext) + + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}) + require.NoError(t, err) + select { + case <-chNext: + assert.Fail(t, "channel should be empty") + default: + } + + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}) + require.NoError(t, err) + + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}, Sender: p2p.ID("c")}, + <-chNext) + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 3, Chunk: []byte{3, 1, 3}, Sender: p2p.ID("d")}, + <-chNext) + assert.Equal(t, + &chunk{Height: 3, Format: 1, Index: 4, Chunk: []byte{3, 1, 4}, Sender: p2p.ID("e")}, + <-chNext) + + _, ok := <-chNext + assert.False(t, ok, "channel should be closed") + + // Calling next on a finished queue should return done + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Next_Closed(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Calling Next on a closed queue should return done + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) + require.NoError(t, err) + err = queue.Close() + require.NoError(t, err) + + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Retry(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Allocate and add all chunks to the queue + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + _, err = queue.Next() + require.NoError(t, err) + } + + // Retrying a couple of chunks makes Next() return them, but they are not allocatable + queue.Retry(3) + queue.Retry(1) + + _, err := queue.Allocate() + assert.Equal(t, errDone, err) + + chunk, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 1, chunk.Index) + + chunk, err = queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 3, chunk.Index) + + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_RetryAll(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + // Allocate and add all chunks to the queue + for i := uint32(0); i < queue.Size(); i++ { + _, err := queue.Allocate() + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: i, Chunk: []byte{byte(i)}}) + require.NoError(t, err) + _, err = queue.Next() + require.NoError(t, err) + } + + _, err := queue.Next() + assert.Equal(t, errDone, err) + + queue.RetryAll() + + _, err = queue.Allocate() + assert.Equal(t, errDone, err) + + for i := uint32(0); i < queue.Size(); i++ { + chunk, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, i, chunk.Index) + } + + _, err = queue.Next() + assert.Equal(t, errDone, err) +} + +func TestChunkQueue_Size(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + assert.EqualValues(t, 5, queue.Size()) + + err := queue.Close() + require.NoError(t, err) + assert.EqualValues(t, 0, queue.Size()) +} + +func TestChunkQueue_WaitFor(t *testing.T) { + queue, teardown := setupChunkQueue(t) + defer teardown() + + waitFor1 := queue.WaitFor(1) + waitFor4 := queue.WaitFor(4) + + // Adding 0 and 2 should not trigger waiters + _, err := queue.Add(&chunk{Height: 3, Format: 1, Index: 0, Chunk: []byte{3, 1, 0}}) + require.NoError(t, err) + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 2, Chunk: []byte{3, 1, 2}}) + require.NoError(t, err) + select { + case <-waitFor1: + require.Fail(t, "WaitFor(1) should not trigger on 0 or 2") + case <-waitFor4: + require.Fail(t, "WaitFor(4) should not trigger on 0 or 2") + default: + } + + // Adding 1 should trigger WaitFor(1), but not WaitFor(4). The channel should be closed. + _, err = queue.Add(&chunk{Height: 3, Format: 1, Index: 1, Chunk: []byte{3, 1, 1}}) + require.NoError(t, err) + assert.EqualValues(t, 1, <-waitFor1) + _, ok := <-waitFor1 + assert.False(t, ok) + select { + case <-waitFor4: + require.Fail(t, "WaitFor(4) should not trigger on 0 or 2") + default: + } + + // Fetch the first chunk. At this point, waiting for either 0 (retrieved from pool) or 1 + // (queued in pool) should immediately return true. + c, err := queue.Next() + require.NoError(t, err) + assert.EqualValues(t, 0, c.Index) + + w := queue.WaitFor(0) + assert.EqualValues(t, 0, <-w) + _, ok = <-w + assert.False(t, ok) + + w = queue.WaitFor(1) + assert.EqualValues(t, 1, <-w) + _, ok = <-w + assert.False(t, ok) + + // Close the queue. This should cause the waiter for 4 to close, and also cause any future + // waiters to get closed channels. + err = queue.Close() + require.NoError(t, err) + _, ok = <-waitFor4 + assert.False(t, ok) + + w = queue.WaitFor(3) + _, ok = <-w + assert.False(t, ok) +} diff --git a/statesync/messages.go b/statesync/messages.go new file mode 100644 index 000000000..83aecd7f1 --- /dev/null +++ b/statesync/messages.go @@ -0,0 +1,129 @@ +package statesync + +import ( + "errors" + "fmt" + + amino "github.com/tendermint/go-amino" + + "github.com/tendermint/tendermint/types" +) + +const ( + // snapshotMsgSize is the maximum size of a snapshotResponseMessage + snapshotMsgSize = int(4e6) + // chunkMsgSize is the maximum size of a chunkResponseMessage + chunkMsgSize = int(16e6) + // maxMsgSize is the maximum size of any message + maxMsgSize = chunkMsgSize +) + +var cdc = amino.NewCodec() + +func init() { + cdc.RegisterInterface((*Message)(nil), nil) + cdc.RegisterConcrete(&snapshotsRequestMessage{}, "tendermint/SnapshotsRequestMessage", nil) + cdc.RegisterConcrete(&snapshotsResponseMessage{}, "tendermint/SnapshotsResponseMessage", nil) + cdc.RegisterConcrete(&chunkRequestMessage{}, "tendermint/ChunkRequestMessage", nil) + cdc.RegisterConcrete(&chunkResponseMessage{}, "tendermint/ChunkResponseMessage", nil) + types.RegisterBlockAmino(cdc) +} + +// decodeMsg decodes a message. +func decodeMsg(bz []byte) (Message, error) { + if len(bz) > maxMsgSize { + return nil, fmt.Errorf("msg exceeds max size (%d > %d)", len(bz), maxMsgSize) + } + var msg Message + err := cdc.UnmarshalBinaryBare(bz, &msg) + if err != nil { + return nil, err + } + return msg, nil +} + +// Message is a message sent and received by the reactor. +type Message interface { + ValidateBasic() error +} + +// snapshotsRequestMessage requests recent snapshots from a peer. +type snapshotsRequestMessage struct{} + +// ValidateBasic implements Message. +func (m *snapshotsRequestMessage) ValidateBasic() error { + if m == nil { + return errors.New("nil message") + } + return nil +} + +// SnapshotResponseMessage contains information about a single snapshot. +type snapshotsResponseMessage struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte +} + +// ValidateBasic implements Message. +func (m *snapshotsResponseMessage) ValidateBasic() error { + if m == nil { + return errors.New("nil message") + } + if m.Height == 0 { + return errors.New("height cannot be 0") + } + if len(m.Hash) == 0 { + return errors.New("snapshot has no hash") + } + if m.Chunks == 0 { + return errors.New("snapshot has no chunks") + } + return nil +} + +// chunkRequestMessage requests a single chunk from a peer. +type chunkRequestMessage struct { + Height uint64 + Format uint32 + Index uint32 +} + +// ValidateBasic implements Message. +func (m *chunkRequestMessage) ValidateBasic() error { + if m == nil { + return errors.New("nil message") + } + if m.Height == 0 { + return errors.New("height cannot be 0") + } + return nil +} + +// chunkResponseMessage contains a single chunk from a peer. +type chunkResponseMessage struct { + Height uint64 + Format uint32 + Index uint32 + Chunk []byte + Missing bool +} + +// ValidateBasic implements Message. +func (m *chunkResponseMessage) ValidateBasic() error { + if m == nil { + return errors.New("nil message") + } + if m.Height == 0 { + return errors.New("height cannot be 0") + } + if m.Missing && len(m.Chunk) > 0 { + return errors.New("missing chunk cannot have contents") + } + if !m.Missing && m.Chunk == nil { + return errors.New("chunk cannot be nil") + } + return nil +} diff --git a/statesync/messages_test.go b/statesync/messages_test.go new file mode 100644 index 000000000..68244a5da --- /dev/null +++ b/statesync/messages_test.go @@ -0,0 +1,106 @@ +package statesync + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestSnapshotsRequestMessage_ValidateBasic(t *testing.T) { + testcases := map[string]struct { + msg *snapshotsRequestMessage + valid bool + }{ + "nil": {nil, false}, + "valid": {&snapshotsRequestMessage{}, true}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + err := tc.msg.ValidateBasic() + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestSnapshotsResponseMessage_ValidateBasic(t *testing.T) { + testcases := map[string]struct { + msg *snapshotsResponseMessage + valid bool + }{ + "nil": {nil, false}, + "valid": {&snapshotsResponseMessage{Height: 1, Format: 1, Chunks: 2, Hash: []byte{1}}, true}, + "0 height": {&snapshotsResponseMessage{Height: 0, Format: 1, Chunks: 2, Hash: []byte{1}}, false}, + "0 format": {&snapshotsResponseMessage{Height: 1, Format: 0, Chunks: 2, Hash: []byte{1}}, true}, + "0 chunks": {&snapshotsResponseMessage{Height: 1, Format: 1, Hash: []byte{1}}, false}, + "no hash": {&snapshotsResponseMessage{Height: 1, Format: 1, Chunks: 2, Hash: []byte{}}, false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + err := tc.msg.ValidateBasic() + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestChunkRequestMessage_ValidateBasic(t *testing.T) { + testcases := map[string]struct { + msg *chunkRequestMessage + valid bool + }{ + "nil": {nil, false}, + "valid": {&chunkRequestMessage{Height: 1, Format: 1, Index: 1}, true}, + "0 height": {&chunkRequestMessage{Height: 0, Format: 1, Index: 1}, false}, + "0 format": {&chunkRequestMessage{Height: 1, Format: 0, Index: 1}, true}, + "0 chunk": {&chunkRequestMessage{Height: 1, Format: 1, Index: 0}, true}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + err := tc.msg.ValidateBasic() + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} + +func TestChunkResponseMessage_ValidateBasic(t *testing.T) { + testcases := map[string]struct { + msg *chunkResponseMessage + valid bool + }{ + "nil message": {nil, false}, + "valid": {&chunkResponseMessage{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}}, true}, + "0 height": {&chunkResponseMessage{Height: 0, Format: 1, Index: 1, Chunk: []byte{1}}, false}, + "0 format": {&chunkResponseMessage{Height: 1, Format: 0, Index: 1, Chunk: []byte{1}}, true}, + "0 chunk": {&chunkResponseMessage{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}, true}, + "empty body": {&chunkResponseMessage{Height: 1, Format: 1, Index: 1, Chunk: []byte{}}, true}, + "nil body": {&chunkResponseMessage{Height: 1, Format: 1, Index: 1, Chunk: nil}, false}, + "missing": {&chunkResponseMessage{Height: 1, Format: 1, Index: 1, Missing: true}, true}, + "missing with empty": {&chunkResponseMessage{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{}}, true}, + "missing with body": {&chunkResponseMessage{Height: 1, Format: 1, Index: 1, Missing: true, Chunk: []byte{1}}, false}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + err := tc.msg.ValidateBasic() + if tc.valid { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/statesync/mocks/state_provider.go b/statesync/mocks/state_provider.go new file mode 100644 index 000000000..0a71aae68 --- /dev/null +++ b/statesync/mocks/state_provider.go @@ -0,0 +1,82 @@ +// Code generated by mockery v1.0.0. DO NOT EDIT. + +package mocks + +import ( + mock "github.com/stretchr/testify/mock" + state "github.com/tendermint/tendermint/state" + + types "github.com/tendermint/tendermint/types" +) + +// StateProvider is an autogenerated mock type for the StateProvider type +type StateProvider struct { + mock.Mock +} + +// AppHash provides a mock function with given fields: height +func (_m *StateProvider) AppHash(height uint64) ([]byte, error) { + ret := _m.Called(height) + + var r0 []byte + if rf, ok := ret.Get(0).(func(uint64) []byte); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]byte) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// Commit provides a mock function with given fields: height +func (_m *StateProvider) Commit(height uint64) (*types.Commit, error) { + ret := _m.Called(height) + + var r0 *types.Commit + if rf, ok := ret.Get(0).(func(uint64) *types.Commit); ok { + r0 = rf(height) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*types.Commit) + } + } + + var r1 error + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// State provides a mock function with given fields: height +func (_m *StateProvider) State(height uint64) (state.State, error) { + ret := _m.Called(height) + + var r0 state.State + if rf, ok := ret.Get(0).(func(uint64) state.State); ok { + r0 = rf(height) + } else { + r0 = ret.Get(0).(state.State) + } + + var r1 error + if rf, ok := ret.Get(1).(func(uint64) error); ok { + r1 = rf(height) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} diff --git a/statesync/reactor.go b/statesync/reactor.go new file mode 100644 index 000000000..b4221bff6 --- /dev/null +++ b/statesync/reactor.go @@ -0,0 +1,261 @@ +package statesync + +import ( + "errors" + "sort" + "sync" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +const ( + // SnapshotChannel exchanges snapshot metadata + SnapshotChannel = byte(0x60) + // ChunkChannel exchanges chunk contents + ChunkChannel = byte(0x61) + // recentSnapshots is the number of recent snapshots to send and receive per peer. + recentSnapshots = 10 +) + +// Reactor handles state sync, both restoring snapshots for the local node and serving snapshots +// for other nodes. +type Reactor struct { + p2p.BaseReactor + + conn proxy.AppConnSnapshot + connQuery proxy.AppConnQuery + tempDir string + + // This will only be set when a state sync is in progress. It is used to feed received + // snapshots and chunks into the sync. + mtx sync.RWMutex + syncer *syncer +} + +// NewReactor creates a new state sync reactor. +func NewReactor(conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, tempDir string) *Reactor { + r := &Reactor{ + conn: conn, + connQuery: connQuery, + } + r.BaseReactor = *p2p.NewBaseReactor("StateSync", r) + return r +} + +// GetChannels implements p2p.Reactor. +func (r *Reactor) GetChannels() []*p2p.ChannelDescriptor { + return []*p2p.ChannelDescriptor{ + { + ID: SnapshotChannel, + Priority: 3, + SendQueueCapacity: 10, + RecvMessageCapacity: snapshotMsgSize, + }, + { + ID: ChunkChannel, + Priority: 1, + SendQueueCapacity: 4, + RecvMessageCapacity: chunkMsgSize, + }, + } +} + +// OnStart implements p2p.Reactor. +func (r *Reactor) OnStart() error { + return nil +} + +// AddPeer implements p2p.Reactor. +func (r *Reactor) AddPeer(peer p2p.Peer) { + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer != nil { + r.syncer.AddPeer(peer) + } +} + +// RemovePeer implements p2p.Reactor. +func (r *Reactor) RemovePeer(peer p2p.Peer, reason interface{}) { + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer != nil { + r.syncer.RemovePeer(peer) + } +} + +// Receive implements p2p.Reactor. +func (r *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { + if !r.IsRunning() { + return + } + + msg, err := decodeMsg(msgBytes) + if err != nil { + r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) + r.Switch.StopPeerForError(src, err) + return + } + err = msg.ValidateBasic() + if err != nil { + r.Logger.Error("Invalid message", "peer", src, "msg", msg, "err", err) + r.Switch.StopPeerForError(src, err) + return + } + + switch chID { + case SnapshotChannel: + switch msg := msg.(type) { + case *snapshotsRequestMessage: + snapshots, err := r.recentSnapshots(recentSnapshots) + if err != nil { + r.Logger.Error("Failed to fetch snapshots", "err", err) + return + } + for _, snapshot := range snapshots { + r.Logger.Debug("Advertising snapshot", "height", snapshot.Height, + "format", snapshot.Format, "peer", src.ID()) + src.Send(chID, cdc.MustMarshalBinaryBare(&snapshotsResponseMessage{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunks: snapshot.Chunks, + Hash: snapshot.Hash, + Metadata: snapshot.Metadata, + })) + } + + case *snapshotsResponseMessage: + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer == nil { + r.Logger.Debug("Received unexpected snapshot, no state sync in progress") + return + } + r.Logger.Debug("Received snapshot", "height", msg.Height, "format", msg.Format, "peer", src.ID()) + _, err := r.syncer.AddSnapshot(src, &snapshot{ + Height: msg.Height, + Format: msg.Format, + Chunks: msg.Chunks, + Hash: msg.Hash, + Metadata: msg.Metadata, + }) + if err != nil { + r.Logger.Error("Failed to add snapshot", "height", msg.Height, "format", msg.Format, + "peer", src.ID(), "err", err) + return + } + + default: + r.Logger.Error("Received unknown message %T", msg) + } + + case ChunkChannel: + switch msg := msg.(type) { + case *chunkRequestMessage: + r.Logger.Debug("Received chunk request", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "peer", src.ID()) + resp, err := r.conn.LoadSnapshotChunkSync(abci.RequestLoadSnapshotChunk{ + Height: msg.Height, + Format: msg.Format, + Chunk: msg.Index, + }) + if err != nil { + r.Logger.Error("Failed to load chunk", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "err", err) + return + } + r.Logger.Debug("Sending chunk", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "peer", src.ID()) + src.Send(ChunkChannel, cdc.MustMarshalBinaryBare(&chunkResponseMessage{ + Height: msg.Height, + Format: msg.Format, + Index: msg.Index, + Chunk: resp.Chunk, + Missing: resp.Chunk == nil, + })) + + case *chunkResponseMessage: + r.mtx.RLock() + defer r.mtx.RUnlock() + if r.syncer == nil { + r.Logger.Debug("Received unexpected chunk, no state sync in progress", "peer", src.ID()) + return + } + r.Logger.Debug("Received chunk, adding to sync", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "peer", src.ID()) + _, err := r.syncer.AddChunk(&chunk{ + Height: msg.Height, + Format: msg.Format, + Index: msg.Index, + Chunk: msg.Chunk, + Sender: src.ID(), + }) + if err != nil { + r.Logger.Error("Failed to add chunk", "height", msg.Height, "format", msg.Format, + "chunk", msg.Index, "err", err) + return + } + + default: + r.Logger.Error("Received unknown message %T", msg) + } + + default: + r.Logger.Error("Received message on invalid channel %x", chID) + } +} + +// recentSnapshots fetches the n most recent snapshots from the app +func (r *Reactor) recentSnapshots(n uint32) ([]*snapshot, error) { + resp, err := r.conn.ListSnapshotsSync(abci.RequestListSnapshots{}) + if err != nil { + return nil, err + } + sort.Slice(resp.Snapshots, func(i, j int) bool { + a := resp.Snapshots[i] + b := resp.Snapshots[j] + switch { + case a.Height > b.Height: + return true + case a.Height == b.Height && a.Format > b.Format: + return true + default: + return false + } + }) + snapshots := make([]*snapshot, 0, n) + for i, s := range resp.Snapshots { + if i >= recentSnapshots { + break + } + snapshots = append(snapshots, &snapshot{ + Height: s.Height, + Format: s.Format, + Chunks: s.Chunks, + Hash: s.Hash, + Metadata: s.Metadata, + }) + } + return snapshots, nil +} + +// Sync runs a state sync, returning the new state and last commit at the snapshot height. +// The caller must store the state and commit in the state database and block store. +func (r *Reactor) Sync(stateProvider StateProvider) (sm.State, *types.Commit, error) { + r.mtx.Lock() + if r.syncer != nil { + r.mtx.Unlock() + return sm.State{}, nil, errors.New("a state sync is already in progress") + } + r.syncer = newSyncer(r.Logger, r.conn, r.connQuery, stateProvider, r.tempDir) + r.mtx.Unlock() + + state, commit, err := r.syncer.SyncAny(defaultDiscoveryTime) + r.mtx.Lock() + r.syncer = nil + r.mtx.Unlock() + return state, commit, err +} diff --git a/statesync/reactor_test.go b/statesync/reactor_test.go new file mode 100644 index 000000000..402eba6d7 --- /dev/null +++ b/statesync/reactor_test.go @@ -0,0 +1,148 @@ +package statesync + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/p2p" + p2pmocks "github.com/tendermint/tendermint/p2p/mocks" + proxymocks "github.com/tendermint/tendermint/proxy/mocks" +) + +func TestReactor_Receive_ChunkRequestMessage(t *testing.T) { + testcases := map[string]struct { + request *chunkRequestMessage + chunk []byte + expectResponse *chunkResponseMessage + }{ + "chunk is returned": { + &chunkRequestMessage{Height: 1, Format: 1, Index: 1}, + []byte{1, 2, 3}, + &chunkResponseMessage{Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 2, 3}}}, + "empty chunk is returned, as nil": { + &chunkRequestMessage{Height: 1, Format: 1, Index: 1}, + []byte{}, + &chunkResponseMessage{Height: 1, Format: 1, Index: 1, Chunk: nil}}, + "nil (missing) chunk is returned as missing": { + &chunkRequestMessage{Height: 1, Format: 1, Index: 1}, + nil, + &chunkResponseMessage{Height: 1, Format: 1, Index: 1, Missing: true}, + }, + } + + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Mock ABCI connection to return local snapshots + conn := &proxymocks.AppConnSnapshot{} + conn.On("LoadSnapshotChunkSync", abci.RequestLoadSnapshotChunk{ + Height: tc.request.Height, + Format: tc.request.Format, + Chunk: tc.request.Index, + }).Return(&abci.ResponseLoadSnapshotChunk{Chunk: tc.chunk}, nil) + + // Mock peer to store response, if found + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + var response *chunkResponseMessage + if tc.expectResponse != nil { + peer.On("Send", ChunkChannel, mock.Anything).Run(func(args mock.Arguments) { + msg, err := decodeMsg(args[1].([]byte)) + require.NoError(t, err) + response = msg.(*chunkResponseMessage) + }).Return(true) + } + + // Start a reactor and send a chunkRequestMessage, then wait for and check response + r := NewReactor(conn, nil, "") + err := r.Start() + require.NoError(t, err) + defer r.Stop() + + r.Receive(ChunkChannel, peer, cdc.MustMarshalBinaryBare(tc.request)) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, tc.expectResponse, response) + + conn.AssertExpectations(t) + peer.AssertExpectations(t) + }) + } +} + +func TestReactor_Receive_SnapshotRequestMessage(t *testing.T) { + testcases := map[string]struct { + snapshots []*abci.Snapshot + expectResponses []*snapshotsResponseMessage + }{ + "no snapshots": {nil, []*snapshotsResponseMessage{}}, + ">10 unordered snapshots": { + []*abci.Snapshot{ + {Height: 1, Format: 2, Chunks: 7, Hash: []byte{1, 2}, Metadata: []byte{1}}, + {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}}, + {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}}, + {Height: 1, Format: 1, Chunks: 7, Hash: []byte{1, 1}, Metadata: []byte{4}}, + {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}}, + {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}}, + {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}}, + {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}}, + {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}}, + {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}}, + {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}}, + {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}}, + }, + []*snapshotsResponseMessage{ + {Height: 3, Format: 4, Chunks: 7, Hash: []byte{3, 4}, Metadata: []byte{9}}, + {Height: 3, Format: 3, Chunks: 7, Hash: []byte{3, 3}, Metadata: []byte{12}}, + {Height: 3, Format: 2, Chunks: 7, Hash: []byte{3, 2}, Metadata: []byte{3}}, + {Height: 3, Format: 1, Chunks: 7, Hash: []byte{3, 1}, Metadata: []byte{6}}, + {Height: 2, Format: 4, Chunks: 7, Hash: []byte{2, 4}, Metadata: []byte{8}}, + {Height: 2, Format: 3, Chunks: 7, Hash: []byte{2, 3}, Metadata: []byte{11}}, + {Height: 2, Format: 2, Chunks: 7, Hash: []byte{2, 2}, Metadata: []byte{2}}, + {Height: 2, Format: 1, Chunks: 7, Hash: []byte{2, 1}, Metadata: []byte{5}}, + {Height: 1, Format: 4, Chunks: 7, Hash: []byte{1, 4}, Metadata: []byte{7}}, + {Height: 1, Format: 3, Chunks: 7, Hash: []byte{1, 3}, Metadata: []byte{10}}, + }, + }, + } + + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + // Mock ABCI connection to return local snapshots + conn := &proxymocks.AppConnSnapshot{} + conn.On("ListSnapshotsSync", abci.RequestListSnapshots{}).Return(&abci.ResponseListSnapshots{ + Snapshots: tc.snapshots, + }, nil) + + // Mock peer to catch responses and store them in a slice + responses := []*snapshotsResponseMessage{} + peer := &p2pmocks.Peer{} + if len(tc.expectResponses) > 0 { + peer.On("ID").Return(p2p.ID("id")) + peer.On("Send", SnapshotChannel, mock.Anything).Run(func(args mock.Arguments) { + msg, err := decodeMsg(args[1].([]byte)) + require.NoError(t, err) + responses = append(responses, msg.(*snapshotsResponseMessage)) + }).Return(true) + } + + // Start a reactor and send a SnapshotsRequestMessage, then wait for and check responses + r := NewReactor(conn, nil, "") + err := r.Start() + require.NoError(t, err) + defer r.Stop() + + r.Receive(SnapshotChannel, peer, cdc.MustMarshalBinaryBare(&snapshotsRequestMessage{})) + time.Sleep(100 * time.Millisecond) + assert.Equal(t, tc.expectResponses, responses) + + conn.AssertExpectations(t) + peer.AssertExpectations(t) + }) + } +} diff --git a/statesync/snapshots.go b/statesync/snapshots.go new file mode 100644 index 000000000..a2afb4083 --- /dev/null +++ b/statesync/snapshots.go @@ -0,0 +1,263 @@ +package statesync + +import ( + "crypto/sha256" + "fmt" + "math/rand" + "sort" + "sync" + + "github.com/tendermint/tendermint/p2p" +) + +// snapshotKey is a snapshot key used for lookups. +type snapshotKey [sha256.Size]byte + +// snapshot contains data about a snapshot. +type snapshot struct { + Height uint64 + Format uint32 + Chunks uint32 + Hash []byte + Metadata []byte + + trustedAppHash []byte // populated by light client +} + +// Key generates a snapshot key, used for lookups. It takes into account not only the height and +// format, but also the chunks, hash, and metadata in case peers have generated snapshots in a +// non-deterministic manner. All fields must be equal for the snapshot to be considered the same. +func (s *snapshot) Key() snapshotKey { + // Hash.Write() never returns an error. + hasher := sha256.New() + hasher.Write([]byte(fmt.Sprintf("%v:%v:%v", s.Height, s.Format, s.Chunks))) + hasher.Write(s.Hash) + hasher.Write(s.Metadata) + var key snapshotKey + copy(key[:], hasher.Sum(nil)) + return key +} + +// snapshotPool discovers and aggregates snapshots across peers. +type snapshotPool struct { + stateProvider StateProvider + + sync.Mutex + snapshots map[snapshotKey]*snapshot + snapshotPeers map[snapshotKey]map[p2p.ID]p2p.Peer + + // indexes for fast searches + formatIndex map[uint32]map[snapshotKey]bool + heightIndex map[uint64]map[snapshotKey]bool + peerIndex map[p2p.ID]map[snapshotKey]bool + + // blacklists for rejected items + formatBlacklist map[uint32]bool + peerBlacklist map[p2p.ID]bool + snapshotBlacklist map[snapshotKey]bool +} + +// newSnapshotPool creates a new snapshot pool. The state source is used for +func newSnapshotPool(stateProvider StateProvider) *snapshotPool { + return &snapshotPool{ + stateProvider: stateProvider, + snapshots: make(map[snapshotKey]*snapshot), + snapshotPeers: make(map[snapshotKey]map[p2p.ID]p2p.Peer), + formatIndex: make(map[uint32]map[snapshotKey]bool), + heightIndex: make(map[uint64]map[snapshotKey]bool), + peerIndex: make(map[p2p.ID]map[snapshotKey]bool), + formatBlacklist: make(map[uint32]bool), + peerBlacklist: make(map[p2p.ID]bool), + snapshotBlacklist: make(map[snapshotKey]bool), + } +} + +// Add adds a snapshot to the pool, unless the peer has already sent recentSnapshots snapshots. It +// returns true if this was a new, non-blacklisted snapshot. The snapshot height is verified using +// the light client, and the expected app hash is set for the snapshot. +func (p *snapshotPool) Add(peer p2p.Peer, snapshot *snapshot) (bool, error) { + appHash, err := p.stateProvider.AppHash(snapshot.Height) + if err != nil { + return false, err + } + snapshot.trustedAppHash = appHash + key := snapshot.Key() + + p.Lock() + defer p.Unlock() + + switch { + case p.formatBlacklist[snapshot.Format]: + return false, nil + case p.peerBlacklist[peer.ID()]: + return false, nil + case p.snapshotBlacklist[key]: + return false, nil + case len(p.peerIndex[peer.ID()]) >= recentSnapshots: + return false, nil + } + + if p.snapshotPeers[key] == nil { + p.snapshotPeers[key] = make(map[p2p.ID]p2p.Peer) + } + p.snapshotPeers[key][peer.ID()] = peer + + if p.peerIndex[peer.ID()] == nil { + p.peerIndex[peer.ID()] = make(map[snapshotKey]bool) + } + p.peerIndex[peer.ID()][key] = true + + if p.snapshots[key] != nil { + return false, nil + } + p.snapshots[key] = snapshot + + if p.formatIndex[snapshot.Format] == nil { + p.formatIndex[snapshot.Format] = make(map[snapshotKey]bool) + } + p.formatIndex[snapshot.Format][key] = true + + if p.heightIndex[snapshot.Height] == nil { + p.heightIndex[snapshot.Height] = make(map[snapshotKey]bool) + } + p.heightIndex[snapshot.Height][key] = true + + return true, nil +} + +// Best returns the "best" currently known snapshot, if any. +func (p *snapshotPool) Best() *snapshot { + ranked := p.Ranked() + if len(ranked) == 0 { + return nil + } + return ranked[0] +} + +// GetPeer returns a random peer for a snapshot, if any. +func (p *snapshotPool) GetPeer(snapshot *snapshot) p2p.Peer { + peers := p.GetPeers(snapshot) + if len(peers) == 0 { + return nil + } + return peers[rand.Intn(len(peers))] +} + +// GetPeers returns the peers for a snapshot. +func (p *snapshotPool) GetPeers(snapshot *snapshot) []p2p.Peer { + key := snapshot.Key() + p.Lock() + defer p.Unlock() + + peers := make([]p2p.Peer, 0, len(p.snapshotPeers[key])) + for _, peer := range p.snapshotPeers[key] { + peers = append(peers, peer) + } + // sort results, for testability (otherwise order is random, so tests randomly fail) + sort.Slice(peers, func(a int, b int) bool { + return peers[a].ID() < peers[b].ID() + }) + return peers +} + +// Ranked returns a list of snapshots ranked by preference. The current heuristic is very naïve, +// preferring the snapshot with the greatest height, then greatest format, then greatest number of +// peers. This can be improved quite a lot. +func (p *snapshotPool) Ranked() []*snapshot { + p.Lock() + defer p.Unlock() + + candidates := make([]*snapshot, 0, len(p.snapshots)) + for _, snapshot := range p.snapshots { + candidates = append(candidates, snapshot) + } + + sort.Slice(candidates, func(i, j int) bool { + a := candidates[i] + b := candidates[j] + + switch { + case a.Height > b.Height: + return true + case a.Height < b.Height: + return false + case a.Format > b.Format: + return true + case a.Format < b.Format: + return false + case len(p.snapshotPeers[a.Key()]) > len(p.snapshotPeers[b.Key()]): + return true + default: + return false + } + }) + + return candidates +} + +// Reject rejects a snapshot. Rejected snapshots will never be used again. +func (p *snapshotPool) Reject(snapshot *snapshot) { + key := snapshot.Key() + p.Lock() + defer p.Unlock() + + p.snapshotBlacklist[key] = true + p.removeSnapshot(key) +} + +// RejectFormat rejects a snapshot format. It will never be used again. +func (p *snapshotPool) RejectFormat(format uint32) { + p.Lock() + defer p.Unlock() + + p.formatBlacklist[format] = true + for key := range p.formatIndex[format] { + p.removeSnapshot(key) + } +} + +// RejectPeer rejects a peer. It will never be used again. +func (p *snapshotPool) RejectPeer(peerID p2p.ID) { + if peerID == "" { + return + } + p.Lock() + defer p.Unlock() + + p.removePeer(peerID) + p.peerBlacklist[peerID] = true +} + +// RemovePeer removes a peer from the pool, and any snapshots that no longer have peers. +func (p *snapshotPool) RemovePeer(peerID p2p.ID) { + p.Lock() + defer p.Unlock() + p.removePeer(peerID) +} + +// removePeer removes a peer. The caller must hold the mutex lock. +func (p *snapshotPool) removePeer(peerID p2p.ID) { + for key := range p.peerIndex[peerID] { + delete(p.snapshotPeers[key], peerID) + if len(p.snapshotPeers[key]) == 0 { + p.removeSnapshot(key) + } + } + delete(p.peerIndex, peerID) +} + +// removeSnapshot removes a snapshot. The caller must hold the mutex lock. +func (p *snapshotPool) removeSnapshot(key snapshotKey) { + snapshot := p.snapshots[key] + if snapshot == nil { + return + } + + delete(p.snapshots, key) + delete(p.formatIndex[snapshot.Format], key) + delete(p.heightIndex[snapshot.Height], key) + for peerID := range p.snapshotPeers[key] { + delete(p.peerIndex[peerID], key) + } + delete(p.snapshotPeers, key) +} diff --git a/statesync/snapshots_test.go b/statesync/snapshots_test.go new file mode 100644 index 000000000..e512da0b9 --- /dev/null +++ b/statesync/snapshots_test.go @@ -0,0 +1,326 @@ +package statesync + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/p2p" + p2pmocks "github.com/tendermint/tendermint/p2p/mocks" + "github.com/tendermint/tendermint/statesync/mocks" +) + +func TestSnapshot_Key(t *testing.T) { + testcases := map[string]struct { + modify func(*snapshot) + }{ + "new height": {func(s *snapshot) { s.Height = 9 }}, + "new format": {func(s *snapshot) { s.Format = 9 }}, + "new chunk count": {func(s *snapshot) { s.Chunks = 9 }}, + "new hash": {func(s *snapshot) { s.Hash = []byte{9} }}, + "no metadata": {func(s *snapshot) { s.Metadata = nil }}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + s := snapshot{ + Height: 3, + Format: 1, + Chunks: 7, + Hash: []byte{1, 2, 3}, + Metadata: []byte{255}, + } + before := s.Key() + tc.modify(&s) + after := s.Key() + assert.NotEqual(t, before, after) + }) + } +} + +func TestSnapshotPool_Add(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", uint64(1)).Return([]byte("app_hash"), nil) + + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + + // Adding to the pool should work + pool := newSnapshotPool(stateProvider) + added, err := pool.Add(peer, &snapshot{ + Height: 1, + Format: 1, + Chunks: 1, + Hash: []byte{1}, + }) + require.NoError(t, err) + assert.True(t, added) + + // Adding again from a different peer should return false + otherPeer := &p2pmocks.Peer{} + otherPeer.On("ID").Return(p2p.ID("other")) + added, err = pool.Add(peer, &snapshot{ + Height: 1, + Format: 1, + Chunks: 1, + Hash: []byte{1}, + }) + require.NoError(t, err) + assert.False(t, added) + + // The pool should have populated the snapshot with the trusted app hash + snapshot := pool.Best() + require.NotNil(t, snapshot) + assert.Equal(t, []byte("app_hash"), snapshot.trustedAppHash) + + stateProvider.AssertExpectations(t) +} + +func TestSnapshotPool_GetPeer(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + _, err := pool.Add(peerA, s) + require.NoError(t, err) + _, err = pool.Add(peerB, s) + require.NoError(t, err) + _, err = pool.Add(peerA, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + + // GetPeer currently picks a random peer, so lets run it until we've seen both. + seenA := false + seenB := false + for !seenA || !seenB { + peer := pool.GetPeer(s) + switch peer.ID() { + case p2p.ID("a"): + seenA = true + case p2p.ID("b"): + seenB = true + } + } + + // GetPeer should return nil for an unknown snapshot + peer := pool.GetPeer(&snapshot{Height: 9, Format: 9}) + assert.Nil(t, peer) +} + +func TestSnapshotPool_GetPeers(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + s := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + _, err := pool.Add(peerA, s) + require.NoError(t, err) + _, err = pool.Add(peerB, s) + require.NoError(t, err) + _, err = pool.Add(peerA, &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}}) + require.NoError(t, err) + + peers := pool.GetPeers(s) + assert.Len(t, peers, 2) + assert.EqualValues(t, "a", peers[0].ID()) + assert.EqualValues(t, "b", peers[1].ID()) +} + +func TestSnapshotPool_Ranked_Best(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + // snapshots in expected order (best to worst). Highest height wins, then highest format. + // Snapshots with different chunk hashes are considered different, and the most peers is + // tie-breaker. + expectSnapshots := []struct { + snapshot *snapshot + peers []string + }{ + {&snapshot{Height: 2, Format: 2, Chunks: 4, Hash: []byte{1, 3}}, []string{"a", "b", "c"}}, + {&snapshot{Height: 2, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"a"}}, + {&snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2}}, []string{"a", "b"}}, + {&snapshot{Height: 1, Format: 2, Chunks: 5, Hash: []byte{1, 2}}, []string{"a", "b"}}, + {&snapshot{Height: 1, Format: 1, Chunks: 4, Hash: []byte{1, 2}}, []string{"a", "b", "c"}}, + } + + // Add snapshots in reverse order, to make sure the pool enforces some order. + for i := len(expectSnapshots) - 1; i >= 0; i-- { + for _, peerID := range expectSnapshots[i].peers { + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID(peerID)) + _, err := pool.Add(peer, expectSnapshots[i].snapshot) + require.NoError(t, err) + } + } + + // Ranked should return the snapshots in the same order + ranked := pool.Ranked() + assert.Len(t, ranked, len(expectSnapshots)) + for i := range ranked { + assert.Equal(t, expectSnapshots[i].snapshot, ranked[i]) + } + + // Check that best snapshots are returned in expected order + for i := range expectSnapshots { + snapshot := expectSnapshots[i].snapshot + require.Equal(t, snapshot, pool.Best()) + pool.Reject(snapshot) + } + assert.Nil(t, pool.Best()) +} + +func TestSnapshotPool_Reject(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + + snapshots := []*snapshot{ + {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 2, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + } + for _, s := range snapshots { + _, err := pool.Add(peer, s) + require.NoError(t, err) + } + + pool.Reject(snapshots[0]) + assert.Equal(t, snapshots[1:], pool.Ranked()) + + added, err := pool.Add(peer, snapshots[0]) + require.NoError(t, err) + assert.False(t, added) + + added, err = pool.Add(peer, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + assert.True(t, added) +} + +// nolint: dupl +func TestSnapshotPool_RejectFormat(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID("id")) + + snapshots := []*snapshot{ + {Height: 2, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 2, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 2, Chunks: 1, Hash: []byte{1, 2}}, + {Height: 1, Format: 1, Chunks: 1, Hash: []byte{1, 2}}, + } + for _, s := range snapshots { + _, err := pool.Add(peer, s) + require.NoError(t, err) + } + + pool.RejectFormat(1) + assert.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) + + added, err := pool.Add(peer, &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + assert.False(t, added) + assert.Equal(t, []*snapshot{snapshots[0], snapshots[2]}, pool.Ranked()) + + added, err = pool.Add(peer, &snapshot{Height: 3, Format: 3, Chunks: 1, Hash: []byte{1}}) + require.NoError(t, err) + assert.True(t, added) +} + +func TestSnapshotPool_RejectPeer(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} + s3 := &snapshot{Height: 3, Format: 1, Chunks: 1, Hash: []byte{2}} + + _, err := pool.Add(peerA, s1) + require.NoError(t, err) + _, err = pool.Add(peerA, s2) + require.NoError(t, err) + + _, err = pool.Add(peerB, s2) + require.NoError(t, err) + _, err = pool.Add(peerB, s3) + require.NoError(t, err) + + pool.RejectPeer(peerA.ID()) + + assert.Empty(t, pool.GetPeers(s1)) + + peers2 := pool.GetPeers(s2) + assert.Len(t, peers2, 1) + assert.EqualValues(t, "b", peers2[0].ID()) + + peers3 := pool.GetPeers(s2) + assert.Len(t, peers3, 1) + assert.EqualValues(t, "b", peers3[0].ID()) + + // it should no longer be possible to add the peer back + _, err = pool.Add(peerA, s1) + require.NoError(t, err) + assert.Empty(t, pool.GetPeers(s1)) +} + +func TestSnapshotPool_RemovePeer(t *testing.T) { + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + pool := newSnapshotPool(stateProvider) + + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + + s1 := &snapshot{Height: 1, Format: 1, Chunks: 1, Hash: []byte{1}} + s2 := &snapshot{Height: 2, Format: 1, Chunks: 1, Hash: []byte{2}} + + _, err := pool.Add(peerA, s1) + require.NoError(t, err) + _, err = pool.Add(peerA, s2) + require.NoError(t, err) + _, err = pool.Add(peerB, s1) + require.NoError(t, err) + + pool.RemovePeer(peerA.ID()) + + peers1 := pool.GetPeers(s1) + assert.Len(t, peers1, 1) + assert.EqualValues(t, "b", peers1[0].ID()) + + peers2 := pool.GetPeers(s2) + assert.Empty(t, peers2) + + // it should still be possible to add the peer back + _, err = pool.Add(peerA, s1) + require.NoError(t, err) + peers1 = pool.GetPeers(s1) + assert.Len(t, peers1, 2) + assert.EqualValues(t, "a", peers1[0].ID()) + assert.EqualValues(t, "b", peers1[1].ID()) +} diff --git a/statesync/stateprovider.go b/statesync/stateprovider.go new file mode 100644 index 000000000..2726de853 --- /dev/null +++ b/statesync/stateprovider.go @@ -0,0 +1,179 @@ +package statesync + +import ( + "fmt" + "strings" + "sync" + "time" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" + lite "github.com/tendermint/tendermint/lite2" + liteprovider "github.com/tendermint/tendermint/lite2/provider" + litehttp "github.com/tendermint/tendermint/lite2/provider/http" + literpc "github.com/tendermint/tendermint/lite2/rpc" + litedb "github.com/tendermint/tendermint/lite2/store/db" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" +) + +//go:generate mockery -case underscore -name StateProvider + +// StateProvider is a provider of trusted state data for bootstrapping a node. This refers +// to the state.State object, not the state machine. +type StateProvider interface { + // AppHash returns the app hash after the given height has been committed. + AppHash(height uint64) ([]byte, error) + // Commit returns the commit at the given height. + Commit(height uint64) (*types.Commit, error) + // State returns a state object at the given height. + State(height uint64) (sm.State, error) +} + +// lightClientStateProvider is a state provider using the light client. +type lightClientStateProvider struct { + sync.Mutex // lite.Client is not concurrency-safe + lc *lite.Client + version sm.Version + providers map[liteprovider.Provider]string +} + +// NewLightClientStateProvider creates a new StateProvider using a light client and RPC clients. +func NewLightClientStateProvider( + chainID string, + version sm.Version, + servers []string, + trustOptions lite.TrustOptions, + logger log.Logger, +) (StateProvider, error) { + if len(servers) < 2 { + return nil, fmt.Errorf("at least 2 RPC servers are required, got %v", len(servers)) + } + + providers := make([]liteprovider.Provider, 0, len(servers)) + providerRemotes := make(map[liteprovider.Provider]string) + for _, server := range servers { + client, err := rpcClient(server) + if err != nil { + return nil, fmt.Errorf("failed to set up RPC client: %w", err) + } + provider := litehttp.NewWithClient(chainID, client) + providers = append(providers, provider) + // We store the RPC addresses keyed by provider, so we can find the address of the primary + // provider used by the light client and use it to fetch consensus parameters. + providerRemotes[provider] = server + } + + lc, err := lite.NewClient(chainID, trustOptions, providers[0], providers[1:], + litedb.New(dbm.NewMemDB(), ""), lite.Logger(logger), lite.MaxRetryAttempts(5)) + if err != nil { + return nil, err + } + return &lightClientStateProvider{ + lc: lc, + version: version, + providers: providerRemotes, + }, nil +} + +// AppHash implements StateProvider. +func (s *lightClientStateProvider) AppHash(height uint64) ([]byte, error) { + s.Lock() + defer s.Unlock() + + // We have to fetch the next height, which contains the app hash for the previous height. + header, err := s.lc.VerifyHeaderAtHeight(int64(height+1), time.Now()) + if err != nil { + return nil, err + } + return header.AppHash, nil +} + +// Commit implements StateProvider. +func (s *lightClientStateProvider) Commit(height uint64) (*types.Commit, error) { + s.Lock() + defer s.Unlock() + header, err := s.lc.VerifyHeaderAtHeight(int64(height), time.Now()) + if err != nil { + return nil, err + } + return header.Commit, nil +} + +// State implements StateProvider. +func (s *lightClientStateProvider) State(height uint64) (sm.State, error) { + s.Lock() + defer s.Unlock() + + state := sm.State{ + ChainID: s.lc.ChainID(), + Version: s.version, + } + + // We need to verify up until h+2, to get the validator set. This also prefetches the headers + // for h and h+1 in the typical case where the trusted header is after the snapshot height. + _, err := s.lc.VerifyHeaderAtHeight(int64(height+2), time.Now()) + if err != nil { + return sm.State{}, err + } + header, err := s.lc.VerifyHeaderAtHeight(int64(height), time.Now()) + if err != nil { + return sm.State{}, err + } + nextHeader, err := s.lc.VerifyHeaderAtHeight(int64(height+1), time.Now()) + if err != nil { + return sm.State{}, err + } + state.LastBlockHeight = header.Height + state.LastBlockTime = header.Time + state.LastBlockID = header.Commit.BlockID + state.AppHash = nextHeader.AppHash + state.LastResultsHash = nextHeader.LastResultsHash + + state.LastValidators, _, err = s.lc.TrustedValidatorSet(int64(height)) + if err != nil { + return sm.State{}, err + } + state.Validators, _, err = s.lc.TrustedValidatorSet(int64(height + 1)) + if err != nil { + return sm.State{}, err + } + state.NextValidators, _, err = s.lc.TrustedValidatorSet(int64(height + 2)) + if err != nil { + return sm.State{}, err + } + state.LastHeightValidatorsChanged = int64(height) + + // We'll also need to fetch consensus params via RPC, using light client verification. + primaryURL, ok := s.providers[s.lc.Primary()] + if !ok || primaryURL == "" { + return sm.State{}, fmt.Errorf("could not find address for primary light client provider") + } + primaryRPC, err := rpcClient(primaryURL) + if err != nil { + return sm.State{}, fmt.Errorf("unable to create RPC client: %w", err) + } + rpcclient := literpc.NewClient(primaryRPC, s.lc) + result, err := rpcclient.ConsensusParams(&nextHeader.Height) + if err != nil { + return sm.State{}, fmt.Errorf("unable to fetch consensus parameters for height %v: %w", + nextHeader.Height, err) + } + state.ConsensusParams = result.ConsensusParams + + return state, nil +} + +// rpcClient sets up a new RPC client +func rpcClient(server string) (*rpchttp.HTTP, error) { + if !strings.Contains(server, "://") { + server = "http://" + server + } + c, err := rpchttp.New(server, "/websocket") + if err != nil { + return nil, err + } + return c, nil +} diff --git a/statesync/syncer.go b/statesync/syncer.go new file mode 100644 index 000000000..b53f452c0 --- /dev/null +++ b/statesync/syncer.go @@ -0,0 +1,442 @@ +package statesync + +import ( + "bytes" + "context" + "errors" + "fmt" + "sync" + "time" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +const ( + // defaultDiscoveryTime is the time to spend discovering snapshots. + defaultDiscoveryTime = 20 * time.Second + // chunkFetchers is the number of concurrent chunk fetchers to run. + chunkFetchers = 4 + // chunkTimeout is the timeout while waiting for the next chunk from the chunk queue. + chunkTimeout = 2 * time.Minute + // requestTimeout is the timeout before rerequesting a chunk, possibly from a different peer. + chunkRequestTimeout = 10 * time.Second +) + +var ( + // errAbort is returned by Sync() when snapshot restoration is aborted. + errAbort = errors.New("state sync aborted") + // errRetrySnapshot is returned by Sync() when the snapshot should be retried. + errRetrySnapshot = errors.New("retry snapshot") + // errRejectSnapshot is returned by Sync() when the snapshot is rejected. + errRejectSnapshot = errors.New("snapshot was rejected") + // errRejectFormat is returned by Sync() when the snapshot format is rejected. + errRejectFormat = errors.New("snapshot format was rejected") + // errRejectSender is returned by Sync() when the snapshot sender is rejected. + errRejectSender = errors.New("snapshot sender was rejected") + // errVerifyFailed is returned by Sync() when app hash or last height verification fails. + errVerifyFailed = errors.New("verification failed") + // errTimeout is returned by Sync() when we've waited too long to receive a chunk. + errTimeout = errors.New("timed out waiting for chunk") + // errNoSnapshots is returned by SyncAny() if no snapshots are found and discovery is disabled. + errNoSnapshots = errors.New("no suitable snapshots found") +) + +// syncer runs a state sync against an ABCI app. Use either SyncAny() to automatically attempt to +// sync all snapshots in the pool (pausing to discover new ones), or Sync() to sync a specific +// snapshot. Snapshots and chunks are fed via AddSnapshot() and AddChunk() as appropriate. +type syncer struct { + logger log.Logger + stateProvider StateProvider + conn proxy.AppConnSnapshot + connQuery proxy.AppConnQuery + snapshots *snapshotPool + tempDir string + + mtx sync.RWMutex + chunks *chunkQueue +} + +// newSyncer creates a new syncer. +func newSyncer(logger log.Logger, conn proxy.AppConnSnapshot, connQuery proxy.AppConnQuery, + stateProvider StateProvider, tempDir string) *syncer { + return &syncer{ + logger: logger, + stateProvider: stateProvider, + conn: conn, + connQuery: connQuery, + snapshots: newSnapshotPool(stateProvider), + tempDir: tempDir, + } +} + +// AddChunk adds a chunk to the chunk queue, if any. It returns false if the chunk has already +// been added to the queue, or an error if there's no sync in progress. +func (s *syncer) AddChunk(chunk *chunk) (bool, error) { + s.mtx.RLock() + defer s.mtx.RUnlock() + if s.chunks == nil { + return false, errors.New("no state sync in progress") + } + added, err := s.chunks.Add(chunk) + if err != nil { + return false, err + } + if added { + s.logger.Debug("Added chunk to queue", "height", chunk.Height, "format", chunk.Format, + "chunk", chunk.Index) + } else { + s.logger.Debug("Ignoring duplicate chunk in queue", "height", chunk.Height, "format", chunk.Format, + "chunk", chunk.Index) + } + return added, nil +} + +// AddSnapshot adds a snapshot to the snapshot pool. It returns true if a new, previously unseen +// snapshot was accepted and added. +func (s *syncer) AddSnapshot(peer p2p.Peer, snapshot *snapshot) (bool, error) { + added, err := s.snapshots.Add(peer, snapshot) + if err != nil { + return false, err + } + if added { + s.logger.Info("Discovered new snapshot", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + } + return added, nil +} + +// AddPeer adds a peer to the pool. For now we just keep it simple and send a single request +// to discover snapshots, later we may want to do retries and stuff. +func (s *syncer) AddPeer(peer p2p.Peer) { + s.logger.Debug("Requesting snapshots from peer", "peer", peer.ID()) + peer.Send(SnapshotChannel, cdc.MustMarshalBinaryBare(&snapshotsRequestMessage{})) +} + +// RemovePeer removes a peer from the pool. +func (s *syncer) RemovePeer(peer p2p.Peer) { + s.logger.Debug("Removing peer from sync", "peer", peer.ID()) + s.snapshots.RemovePeer(peer.ID()) +} + +// SyncAny tries to sync any of the snapshots in the snapshot pool, waiting to discover further +// snapshots if none were found and discoveryTime > 0. It returns the latest state and block commit +// which the caller must use to bootstrap the node. +func (s *syncer) SyncAny(discoveryTime time.Duration) (sm.State, *types.Commit, error) { + if discoveryTime > 0 { + s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) + time.Sleep(discoveryTime) + } + + // The app may ask us to retry a snapshot restoration, in which case we need to reuse + // the snapshot and chunk queue from the previous loop iteration. + var ( + snapshot *snapshot + chunks *chunkQueue + err error + ) + for { + // If not nil, we're going to retry restoration of the same snapshot. + if snapshot == nil { + snapshot = s.snapshots.Best() + chunks = nil + } + if snapshot == nil { + if discoveryTime == 0 { + return sm.State{}, nil, errNoSnapshots + } + s.logger.Info(fmt.Sprintf("Discovering snapshots for %v", discoveryTime)) + time.Sleep(discoveryTime) + continue + } + if chunks == nil { + chunks, err = newChunkQueue(snapshot, s.tempDir) + if err != nil { + return sm.State{}, nil, fmt.Errorf("failed to create chunk queue: %w", err) + } + defer chunks.Close() // in case we forget to close it elsewhere + } + + newState, commit, err := s.Sync(snapshot, chunks) + switch { + case err == nil: + return newState, commit, nil + + case errors.Is(err, errAbort): + return sm.State{}, nil, err + + case errors.Is(err, errRetrySnapshot): + chunks.RetryAll() + s.logger.Info("Retrying snapshot", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + continue + + case errors.Is(err, errTimeout): + s.snapshots.Reject(snapshot) + s.logger.Error("Timed out waiting for snapshot chunks, rejected snapshot", + "height", snapshot.Height, "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + + case errors.Is(err, errRejectSnapshot): + s.snapshots.Reject(snapshot) + s.logger.Info("Snapshot rejected", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + + case errors.Is(err, errRejectFormat): + s.snapshots.RejectFormat(snapshot.Format) + s.logger.Info("Snapshot format rejected", "format", snapshot.Format) + + case errors.Is(err, errRejectSender): + s.logger.Info("Snapshot senders rejected", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + for _, peer := range s.snapshots.GetPeers(snapshot) { + s.snapshots.RejectPeer(peer.ID()) + s.logger.Info("Snapshot sender rejected", "peer", peer.ID()) + } + + default: + return sm.State{}, nil, fmt.Errorf("snapshot restoration failed: %w", err) + } + + // Discard snapshot and chunks for next iteration + err = chunks.Close() + if err != nil { + s.logger.Error("Failed to clean up chunk queue", "err", err) + } + snapshot = nil + chunks = nil + } +} + +// Sync executes a sync for a specific snapshot, returning the latest state and block commit which +// the caller must use to bootstrap the node. +func (s *syncer) Sync(snapshot *snapshot, chunks *chunkQueue) (sm.State, *types.Commit, error) { + s.mtx.Lock() + if s.chunks != nil { + s.mtx.Unlock() + return sm.State{}, nil, errors.New("a state sync is already in progress") + } + s.chunks = chunks + s.mtx.Unlock() + defer func() { + s.mtx.Lock() + s.chunks = nil + s.mtx.Unlock() + }() + + // Offer snapshot to ABCI app. + err := s.offerSnapshot(snapshot) + if err != nil { + return sm.State{}, nil, err + } + + // Spawn chunk fetchers. They will terminate when the chunk queue is closed or context cancelled. + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for i := int32(0); i < chunkFetchers; i++ { + go s.fetchChunks(ctx, snapshot, chunks) + } + + // Optimistically build new state, so we don't discover any light client failures at the end. + state, err := s.stateProvider.State(snapshot.Height) + if err != nil { + return sm.State{}, nil, fmt.Errorf("failed to build new state: %w", err) + } + commit, err := s.stateProvider.Commit(snapshot.Height) + if err != nil { + return sm.State{}, nil, fmt.Errorf("failed to fetch commit: %w", err) + } + + // Restore snapshot + err = s.applyChunks(chunks) + if err != nil { + return sm.State{}, nil, err + } + + // Verify app and update app version + appVersion, err := s.verifyApp(snapshot) + if err != nil { + return sm.State{}, nil, err + } + state.Version.Consensus.App = version.Protocol(appVersion) + + // Done! 🎉 + s.logger.Info("Snapshot restored", "height", snapshot.Height, "format", snapshot.Format, + "hash", fmt.Sprintf("%X", snapshot.Hash)) + + return state, commit, nil +} + +// offerSnapshot offers a snapshot to the app. It returns various errors depending on the app's +// response, or nil if the snapshot was accepted. +func (s *syncer) offerSnapshot(snapshot *snapshot) error { + s.logger.Info("Offering snapshot to ABCI app", "height", snapshot.Height, + "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + resp, err := s.conn.OfferSnapshotSync(abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: snapshot.Height, + Format: snapshot.Format, + Chunks: snapshot.Chunks, + Hash: snapshot.Hash, + Metadata: snapshot.Metadata, + }, + AppHash: snapshot.trustedAppHash, + }) + if err != nil { + return fmt.Errorf("failed to offer snapshot: %w", err) + } + switch resp.Result { + case abci.ResponseOfferSnapshot_accept: + s.logger.Info("Snapshot accepted, restoring", "height", snapshot.Height, + "format", snapshot.Format, "hash", fmt.Sprintf("%X", snapshot.Hash)) + return nil + case abci.ResponseOfferSnapshot_abort: + return errAbort + case abci.ResponseOfferSnapshot_reject: + return errRejectSnapshot + case abci.ResponseOfferSnapshot_reject_format: + return errRejectFormat + case abci.ResponseOfferSnapshot_reject_sender: + return errRejectSender + default: + return fmt.Errorf("invalid ResponseOfferSnapshot result %v", resp.Result) + } +} + +// applyChunks applies chunks to the app. It returns various errors depending on the app's +// response, or nil once the snapshot is fully restored. +func (s *syncer) applyChunks(chunks *chunkQueue) error { + for { + chunk, err := chunks.Next() + if err == errDone { + return nil + } else if err != nil { + return fmt.Errorf("failed to fetch chunk: %w", err) + } + + resp, err := s.conn.ApplySnapshotChunkSync(abci.RequestApplySnapshotChunk{ + Index: chunk.Index, + Chunk: chunk.Chunk, + Sender: string(chunk.Sender), + }) + if err != nil { + return fmt.Errorf("failed to apply chunk %v: %w", chunk.Index, err) + } + s.logger.Info("Applied snapshot chunk to ABCI app", "height", chunk.Height, + "format", chunk.Format, "chunk", chunk.Index, "total", chunks.Size()) + + // Discard and refetch any chunks as requested by the app + for _, index := range resp.RefetchChunks { + err := chunks.Discard(index) + if err != nil { + return fmt.Errorf("failed to discard chunk %v: %w", index, err) + } + } + + // Reject any senders as requested by the app + for _, sender := range resp.RejectSenders { + if sender != "" { + s.snapshots.RejectPeer(p2p.ID(sender)) + err := chunks.DiscardSender(p2p.ID(sender)) + if err != nil { + return fmt.Errorf("failed to reject sender: %w", err) + } + } + } + + switch resp.Result { + case abci.ResponseApplySnapshotChunk_accept: + case abci.ResponseApplySnapshotChunk_abort: + return errAbort + case abci.ResponseApplySnapshotChunk_retry: + chunks.Retry(chunk.Index) + case abci.ResponseApplySnapshotChunk_retry_snapshot: + return errRetrySnapshot + case abci.ResponseApplySnapshotChunk_reject_snapshot: + return errRejectSnapshot + default: + return fmt.Errorf("unknown ResponseApplySnapshotChunk result %v", resp.Result) + } + } +} + +// fetchChunks requests chunks from peers, receiving allocations from the chunk queue. Chunks +// will be received from the reactor via syncer.AddChunks() to chunkQueue.Add(). +func (s *syncer) fetchChunks(ctx context.Context, snapshot *snapshot, chunks *chunkQueue) { + for { + index, err := chunks.Allocate() + if err == errDone { + // Keep checking until the context is cancelled (restore is done), in case any + // chunks need to be refetched. + select { + case <-ctx.Done(): + return + default: + } + time.Sleep(2 * time.Second) + continue + } + if err != nil { + s.logger.Error("Failed to allocate chunk from queue", "err", err) + return + } + s.logger.Info("Fetching snapshot chunk", "height", snapshot.Height, + "format", snapshot.Format, "chunk", index, "total", chunks.Size()) + + ticker := time.NewTicker(chunkRequestTimeout) + defer ticker.Stop() + s.requestChunk(snapshot, index) + select { + case <-chunks.WaitFor(index): + case <-ticker.C: + s.requestChunk(snapshot, index) + case <-ctx.Done(): + return + } + ticker.Stop() + } +} + +// requestChunk requests a chunk from a peer. +func (s *syncer) requestChunk(snapshot *snapshot, chunk uint32) { + peer := s.snapshots.GetPeer(snapshot) + if peer == nil { + s.logger.Error("No valid peers found for snapshot", "height", snapshot.Height, + "format", snapshot.Format, "hash", snapshot.Hash) + return + } + s.logger.Debug("Requesting snapshot chunk", "height", snapshot.Height, + "format", snapshot.Format, "chunk", chunk, "peer", peer.ID()) + peer.Send(ChunkChannel, cdc.MustMarshalBinaryBare(&chunkRequestMessage{ + Height: snapshot.Height, + Format: snapshot.Format, + Index: chunk, + })) +} + +// verifyApp verifies the sync, checking the app hash and last block height. It returns the +// app version, which should be returned as part of the initial state. +func (s *syncer) verifyApp(snapshot *snapshot) (uint64, error) { + resp, err := s.connQuery.InfoSync(proxy.RequestInfo) + if err != nil { + return 0, fmt.Errorf("failed to query ABCI app for appHash: %w", err) + } + if !bytes.Equal(snapshot.trustedAppHash, resp.LastBlockAppHash) { + s.logger.Error("appHash verification failed", + "expected", fmt.Sprintf("%X", snapshot.trustedAppHash), + "actual", fmt.Sprintf("%X", resp.LastBlockAppHash)) + return 0, errVerifyFailed + } + if uint64(resp.LastBlockHeight) != snapshot.Height { + s.logger.Error("ABCI app reported unexpected last block height", + "expected", snapshot.Height, "actual", resp.LastBlockHeight) + return 0, errVerifyFailed + } + s.logger.Info("Verified ABCI app", "height", snapshot.Height, + "appHash", fmt.Sprintf("%X", snapshot.trustedAppHash)) + return resp.AppVersion, nil +} diff --git a/statesync/syncer_test.go b/statesync/syncer_test.go new file mode 100644 index 000000000..c56c966a2 --- /dev/null +++ b/statesync/syncer_test.go @@ -0,0 +1,639 @@ +package statesync + +import ( + "errors" + "sync" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + abci "github.com/tendermint/tendermint/abci/types" + "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/p2p" + p2pmocks "github.com/tendermint/tendermint/p2p/mocks" + "github.com/tendermint/tendermint/proxy" + proxymocks "github.com/tendermint/tendermint/proxy/mocks" + sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/statesync/mocks" + "github.com/tendermint/tendermint/types" + "github.com/tendermint/tendermint/version" +) + +// Sets up a basic syncer that can be used to test OfferSnapshot requests +func setupOfferSyncer(t *testing.T) (*syncer, *proxymocks.AppConnSnapshot) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + return syncer, connSnapshot +} + +// Sets up a simple peer mock with an ID +func simplePeer(id string) *p2pmocks.Peer { + peer := &p2pmocks.Peer{} + peer.On("ID").Return(p2p.ID(id)) + return peer +} + +func TestSyncer_SyncAny(t *testing.T) { + state := sm.State{ + ChainID: "chain", + Version: sm.Version{ + Consensus: version.Consensus{ + Block: version.BlockProtocol, + App: 0, + }, + + Software: version.TMCoreSemVer, + }, + + LastBlockHeight: 1, + LastBlockID: types.BlockID{Hash: []byte("blockhash")}, + LastBlockTime: time.Now(), + LastResultsHash: []byte("last_results_hash"), + AppHash: []byte("app_hash"), + + LastValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val1")}}, + Validators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val2")}}, + NextValidators: &types.ValidatorSet{Proposer: &types.Validator{Address: []byte("val3")}}, + + ConsensusParams: *types.DefaultConsensusParams(), + LastHeightConsensusParamsChanged: 1, + } + commit := &types.Commit{BlockID: types.BlockID{Hash: []byte("blockhash")}} + + chunks := []*chunk{ + {Height: 1, Format: 1, Index: 0, Chunk: []byte{1, 1, 0}}, + {Height: 1, Format: 1, Index: 1, Chunk: []byte{1, 1, 1}}, + {Height: 1, Format: 1, Index: 2, Chunk: []byte{1, 1, 2}}, + } + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", uint64(1)).Return(state.AppHash, nil) + stateProvider.On("AppHash", uint64(2)).Return([]byte("app_hash_2"), nil) + stateProvider.On("Commit", uint64(1)).Return(commit, nil) + stateProvider.On("State", uint64(1)).Return(state, nil) + connSnapshot := &proxymocks.AppConnSnapshot{} + connQuery := &proxymocks.AppConnQuery{} + + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + // Adding a chunk should error when no sync is in progress + _, err := syncer.AddChunk(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{1}}) + require.Error(t, err) + + // Adding a couple of peers should trigger snapshot discovery messages + peerA := &p2pmocks.Peer{} + peerA.On("ID").Return(p2p.ID("a")) + peerA.On("Send", SnapshotChannel, cdc.MustMarshalBinaryBare(&snapshotsRequestMessage{})).Return(true) + syncer.AddPeer(peerA) + peerA.AssertExpectations(t) + + peerB := &p2pmocks.Peer{} + peerB.On("ID").Return(p2p.ID("b")) + peerB.On("Send", SnapshotChannel, cdc.MustMarshalBinaryBare(&snapshotsRequestMessage{})).Return(true) + syncer.AddPeer(peerB) + peerB.AssertExpectations(t) + + // Both peers report back with snapshots. One of them also returns a snapshot we don't want, in + // format 2, which will be rejected by the ABCI application. + new, err := syncer.AddSnapshot(peerA, s) + require.NoError(t, err) + assert.True(t, new) + + new, err = syncer.AddSnapshot(peerB, s) + require.NoError(t, err) + assert.False(t, new) + + new, err = syncer.AddSnapshot(peerB, &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1}}) + require.NoError(t, err) + assert.True(t, new) + + // We start a sync, with peers sending back chunks when requested. We first reject the snapshot + // with height 2 format 2, and accept the snapshot at height 1. + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: 2, + Format: 2, + Chunks: 3, + Hash: []byte{1}, + }, + AppHash: []byte("app_hash_2"), + }).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_reject_format}, nil) + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: &abci.Snapshot{ + Height: s.Height, + Format: s.Format, + Chunks: s.Chunks, + Hash: s.Hash, + Metadata: s.Metadata, + }, + AppHash: []byte("app_hash"), + }).Times(2).Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_accept}, nil) + + chunkRequests := make(map[uint32]int) + chunkRequestsMtx := sync.Mutex{} + onChunkRequest := func(args mock.Arguments) { + msg := &chunkRequestMessage{} + err := cdc.UnmarshalBinaryBare(args[1].([]byte), &msg) + require.NoError(t, err) + require.EqualValues(t, 1, msg.Height) + require.EqualValues(t, 1, msg.Format) + require.LessOrEqual(t, msg.Index, uint32(len(chunks))) + + added, err := syncer.AddChunk(chunks[msg.Index]) + require.NoError(t, err) + assert.True(t, added) + + chunkRequestsMtx.Lock() + chunkRequests[msg.Index]++ + chunkRequestsMtx.Unlock() + } + peerA.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) + peerB.On("Send", ChunkChannel, mock.Anything).Maybe().Run(onChunkRequest).Return(true) + + // The first time we're applying chunk 2 we tell it to retry the snapshot and discard chunk 1, + // which should cause it to keep the existing chunk 0 and 2, and restart restoration from + // beginning. We also wait for a little while, to exercise the retry logic in fetchChunks(). + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{1, 1, 2}, + }).Once().Run(func(args mock.Arguments) { time.Sleep(2 * time.Second) }).Return( + &abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_retry_snapshot, + RefetchChunks: []uint32{1}, + }, nil) + + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: []byte{1, 1, 0}, + }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 1, Chunk: []byte{1, 1, 1}, + }).Times(2).Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{1, 1, 2}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + connQuery.On("InfoSync", proxy.RequestInfo).Return(&abci.ResponseInfo{ + AppVersion: 9, + LastBlockHeight: 1, + LastBlockAppHash: []byte("app_hash"), + }, nil) + + newState, lastCommit, err := syncer.SyncAny(0) + require.NoError(t, err) + + chunkRequestsMtx.Lock() + assert.Equal(t, map[uint32]int{0: 1, 1: 2, 2: 1}, chunkRequests) + chunkRequestsMtx.Unlock() + + // The syncer should have updated the state app version from the ABCI info response. + expectState := state + expectState.Version.Consensus.App = 9 + + assert.Equal(t, expectState, newState) + assert.Equal(t, commit, lastCommit) + + connSnapshot.AssertExpectations(t) + connQuery.AssertExpectations(t) + peerA.AssertExpectations(t) + peerB.AssertExpectations(t) +} + +func TestSyncer_SyncAny_noSnapshots(t *testing.T) { + syncer, _ := setupOfferSyncer(t) + _, _, err := syncer.SyncAny(0) + assert.Equal(t, errNoSnapshots, err) +} + +func TestSyncer_SyncAny_abort(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + syncer.AddSnapshot(simplePeer("id"), s) + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_abort}, nil) + + _, _, err := syncer.SyncAny(0) + assert.Equal(t, errAbort, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_reject(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + // s22 is tried first, then s12, then s11, then errNoSnapshots + s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + syncer.AddSnapshot(simplePeer("id"), s22) + syncer.AddSnapshot(simplePeer("id"), s12) + syncer.AddSnapshot(simplePeer("id"), s11) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s22), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_reject}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s12), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_reject}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s11), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_reject}, nil) + + _, _, err := syncer.SyncAny(0) + assert.Equal(t, errNoSnapshots, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_reject_format(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + // s22 is tried first, which reject s22 and s12, then s11 will abort. + s22 := &snapshot{Height: 2, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s12 := &snapshot{Height: 1, Format: 2, Chunks: 3, Hash: []byte{1, 2, 3}} + s11 := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + syncer.AddSnapshot(simplePeer("id"), s22) + syncer.AddSnapshot(simplePeer("id"), s12) + syncer.AddSnapshot(simplePeer("id"), s11) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s22), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_reject_format}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s11), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_abort}, nil) + + _, _, err := syncer.SyncAny(0) + assert.Equal(t, errAbort, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_reject_sender(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + peerA := simplePeer("a") + peerB := simplePeer("b") + peerC := simplePeer("c") + + // sbc will be offered first, which will be rejected with reject_sender, causing all snapshots + // submitted by both b and c (i.e. sb, sc, sbc) to be rejected. Finally, sa will reject and + // errNoSnapshots is returned. + sa := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + sb := &snapshot{Height: 2, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + sc := &snapshot{Height: 3, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + sbc := &snapshot{Height: 4, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + _, err := syncer.AddSnapshot(peerA, sa) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerB, sb) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerC, sc) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerB, sbc) + require.NoError(t, err) + _, err = syncer.AddSnapshot(peerC, sbc) + require.NoError(t, err) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(sbc), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_reject_sender}, nil) + + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(sa), AppHash: []byte("app_hash"), + }).Once().Return(&abci.ResponseOfferSnapshot{Result: abci.ResponseOfferSnapshot_reject}, nil) + + _, _, err = syncer.SyncAny(0) + assert.Equal(t, errNoSnapshots, err) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_SyncAny_abciError(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + + errBoom := errors.New("boom") + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}} + syncer.AddSnapshot(simplePeer("id"), s) + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), AppHash: []byte("app_hash"), + }).Once().Return(nil, errBoom) + + _, _, err := syncer.SyncAny(0) + assert.True(t, errors.Is(err, errBoom)) + connSnapshot.AssertExpectations(t) +} + +func TestSyncer_offerSnapshot(t *testing.T) { + unknownErr := errors.New("unknown error") + boom := errors.New("boom") + + testcases := map[string]struct { + result abci.ResponseOfferSnapshot_Result + err error + expectErr error + }{ + "accept": {abci.ResponseOfferSnapshot_accept, nil, nil}, + "abort": {abci.ResponseOfferSnapshot_abort, nil, errAbort}, + "reject": {abci.ResponseOfferSnapshot_reject, nil, errRejectSnapshot}, + "reject_format": {abci.ResponseOfferSnapshot_reject_format, nil, errRejectFormat}, + "reject_sender": {abci.ResponseOfferSnapshot_reject_sender, nil, errRejectSender}, + "error": {0, boom, boom}, + "unknown result": {9, nil, unknownErr}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + syncer, connSnapshot := setupOfferSyncer(t) + s := &snapshot{Height: 1, Format: 1, Chunks: 3, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} + connSnapshot.On("OfferSnapshotSync", abci.RequestOfferSnapshot{ + Snapshot: toABCI(s), + AppHash: []byte("app_hash"), + }).Return(&abci.ResponseOfferSnapshot{Result: tc.result}, tc.err) + err := syncer.offerSnapshot(s) + if tc.expectErr == unknownErr { + require.Error(t, err) + } else { + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped + } + assert.Equal(t, tc.expectErr, err) + } + }) + } +} + +func TestSyncer_applyChunks_Results(t *testing.T) { + unknownErr := errors.New("unknown error") + boom := errors.New("boom") + + testcases := map[string]struct { + result abci.ResponseApplySnapshotChunk_Result + err error + expectErr error + }{ + "accept": {abci.ResponseApplySnapshotChunk_accept, nil, nil}, + "abort": {abci.ResponseApplySnapshotChunk_abort, nil, errAbort}, + "retry": {abci.ResponseApplySnapshotChunk_retry, nil, nil}, + "retry_snapshot": {abci.ResponseApplySnapshotChunk_retry_snapshot, nil, errRetrySnapshot}, + "reject_snapshot": {abci.ResponseApplySnapshotChunk_reject_snapshot, nil, errRejectSnapshot}, + "error": {0, boom, boom}, + "unknown result": {9, nil, unknownErr}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + body := []byte{1, 2, 3} + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 1}, "") + chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: body}) + require.NoError(t, err) + + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: body, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: tc.result}, tc.err) + if tc.result == abci.ResponseApplySnapshotChunk_retry { + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: body, + }).Once().Return(&abci.ResponseApplySnapshotChunk{ + Result: abci.ResponseApplySnapshotChunk_accept}, nil) + } + + err = syncer.applyChunks(chunks) + if tc.expectErr == unknownErr { + require.Error(t, err) + } else { + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped + } + assert.Equal(t, tc.expectErr, err) + } + connSnapshot.AssertExpectations(t) + }) + } +} + +func TestSyncer_applyChunks_RefetchChunks(t *testing.T) { + // Discarding chunks via refetch_chunks should work the same for all results + testcases := map[string]struct { + result abci.ResponseApplySnapshotChunk_Result + }{ + "accept": {abci.ResponseApplySnapshotChunk_accept}, + "abort": {abci.ResponseApplySnapshotChunk_abort}, + "retry": {abci.ResponseApplySnapshotChunk_retry}, + "retry_snapshot": {abci.ResponseApplySnapshotChunk_retry_snapshot}, + "reject_snapshot": {abci.ResponseApplySnapshotChunk_reject_snapshot}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + chunks, err := newChunkQueue(&snapshot{Height: 1, Format: 1, Chunks: 3}, "") + require.NoError(t, err) + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}}) + require.True(t, added) + require.NoError(t, err) + + // The first two chunks are accepted, before the last one asks for 1 to be refetched + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: []byte{0}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 1, Chunk: []byte{1}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, + }).Once().Return(&abci.ResponseApplySnapshotChunk{ + Result: tc.result, + RefetchChunks: []uint32{1}, + }, nil) + + // Since removing the chunk will cause Next() to block, we spawn a goroutine, then + // check the queue contents, and finally close the queue to end the goroutine. + // We don't really care about the result of applyChunks, since it has separate test. + go func() { + syncer.applyChunks(chunks) + }() + + time.Sleep(50 * time.Millisecond) + assert.True(t, chunks.Has(0)) + assert.False(t, chunks.Has(1)) + assert.True(t, chunks.Has(2)) + err = chunks.Close() + require.NoError(t, err) + }) + } +} + +func TestSyncer_applyChunks_RejectSenders(t *testing.T) { + // Banning chunks senders via ban_chunk_senders should work the same for all results + testcases := map[string]struct { + result abci.ResponseApplySnapshotChunk_Result + }{ + "accept": {abci.ResponseApplySnapshotChunk_accept}, + "abort": {abci.ResponseApplySnapshotChunk_abort}, + "retry": {abci.ResponseApplySnapshotChunk_retry}, + "retry_snapshot": {abci.ResponseApplySnapshotChunk_retry_snapshot}, + "reject_snapshot": {abci.ResponseApplySnapshotChunk_reject_snapshot}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + stateProvider.On("AppHash", mock.Anything).Return([]byte("app_hash"), nil) + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + // Set up three peers across two snapshots, and ask for one of them to be banned. + // It should be banned from all snapshots. + peerA := simplePeer("a") + peerB := simplePeer("b") + peerC := simplePeer("c") + + s1 := &snapshot{Height: 1, Format: 1, Chunks: 3} + s2 := &snapshot{Height: 2, Format: 1, Chunks: 3} + syncer.AddSnapshot(peerA, s1) + syncer.AddSnapshot(peerA, s2) + syncer.AddSnapshot(peerB, s1) + syncer.AddSnapshot(peerB, s2) + syncer.AddSnapshot(peerC, s1) + syncer.AddSnapshot(peerC, s2) + + chunks, err := newChunkQueue(s1, "") + require.NoError(t, err) + added, err := chunks.Add(&chunk{Height: 1, Format: 1, Index: 0, Chunk: []byte{0}, Sender: peerA.ID()}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 1, Chunk: []byte{1}, Sender: peerB.ID()}) + require.True(t, added) + require.NoError(t, err) + added, err = chunks.Add(&chunk{Height: 1, Format: 1, Index: 2, Chunk: []byte{2}, Sender: peerC.ID()}) + require.True(t, added) + require.NoError(t, err) + + // The first two chunks are accepted, before the last one asks for b sender to be rejected + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 0, Chunk: []byte{0}, Sender: "a", + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 1, Chunk: []byte{1}, Sender: "b", + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, Sender: "c", + }).Once().Return(&abci.ResponseApplySnapshotChunk{ + Result: tc.result, + RejectSenders: []string{string(peerB.ID())}, + }, nil) + + // On retry, the last chunk will be tried again, so we just accept it then. + if tc.result == abci.ResponseApplySnapshotChunk_retry { + connSnapshot.On("ApplySnapshotChunkSync", abci.RequestApplySnapshotChunk{ + Index: 2, Chunk: []byte{2}, Sender: "c", + }).Once().Return(&abci.ResponseApplySnapshotChunk{Result: abci.ResponseApplySnapshotChunk_accept}, nil) + } + + // We don't really care about the result of applyChunks, since it has separate test. + // However, it will block on e.g. retry result, so we spawn a goroutine that will + // be shut down when the chunk queue closes. + go func() { + syncer.applyChunks(chunks) + }() + + time.Sleep(50 * time.Millisecond) + + s1peers := syncer.snapshots.GetPeers(s1) + assert.Len(t, s1peers, 2) + assert.EqualValues(t, "a", s1peers[0].ID()) + assert.EqualValues(t, "c", s1peers[1].ID()) + + syncer.snapshots.GetPeers(s1) + assert.Len(t, s1peers, 2) + assert.EqualValues(t, "a", s1peers[0].ID()) + assert.EqualValues(t, "c", s1peers[1].ID()) + + err = chunks.Close() + require.NoError(t, err) + }) + } +} + +func TestSyncer_verifyApp(t *testing.T) { + boom := errors.New("boom") + s := &snapshot{Height: 3, Format: 1, Chunks: 5, Hash: []byte{1, 2, 3}, trustedAppHash: []byte("app_hash")} + + testcases := map[string]struct { + response *abci.ResponseInfo + err error + expectErr error + }{ + "verified": {&abci.ResponseInfo{ + LastBlockHeight: 3, + LastBlockAppHash: []byte("app_hash"), + AppVersion: 9, + }, nil, nil}, + "invalid height": {&abci.ResponseInfo{ + LastBlockHeight: 5, + LastBlockAppHash: []byte("app_hash"), + AppVersion: 9, + }, nil, errVerifyFailed}, + "invalid hash": {&abci.ResponseInfo{ + LastBlockHeight: 3, + LastBlockAppHash: []byte("xxx"), + AppVersion: 9, + }, nil, errVerifyFailed}, + "error": {nil, boom, boom}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + connQuery := &proxymocks.AppConnQuery{} + connSnapshot := &proxymocks.AppConnSnapshot{} + stateProvider := &mocks.StateProvider{} + syncer := newSyncer(log.NewNopLogger(), connSnapshot, connQuery, stateProvider, "") + + connQuery.On("InfoSync", proxy.RequestInfo).Return(tc.response, tc.err) + version, err := syncer.verifyApp(s) + unwrapped := errors.Unwrap(err) + if unwrapped != nil { + err = unwrapped + } + assert.Equal(t, tc.expectErr, err) + if err == nil { + assert.Equal(t, tc.response.AppVersion, version) + } + }) + } +} + +func toABCI(s *snapshot) *abci.Snapshot { + return &abci.Snapshot{ + Height: s.Height, + Format: s.Format, + Chunks: s.Chunks, + Hash: s.Hash, + Metadata: s.Metadata, + } +} diff --git a/store/store.go b/store/store.go index c971a9a15..2ef1f3c6e 100644 --- a/store/store.go +++ b/store/store.go @@ -333,6 +333,12 @@ func (bs *BlockStore) saveState() { bsJSON.Save(bs.db) } +// SaveSeenCommit saves a seen commit, used by e.g. the state sync reactor when bootstrapping node. +func (bs *BlockStore) SaveSeenCommit(height int64, seenCommit *types.Commit) error { + seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit) + return bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) +} + //----------------------------------------------------------------------------- func calcBlockMetaKey(height int64) []byte { diff --git a/version/version.go b/version/version.go index b0521c2c1..f4968484b 100644 --- a/version/version.go +++ b/version/version.go @@ -23,7 +23,7 @@ const ( TMCoreSemVer = "0.33.4" // ABCISemVer is the semantic version of the ABCI library - ABCISemVer = "0.16.2" + ABCISemVer = "0.17.0" ABCIVersion = ABCISemVer )