diff --git a/changelogs/unreleased/1304-skriss b/changelogs/unreleased/1304-skriss new file mode 100644 index 000000000..df64793ba --- /dev/null +++ b/changelogs/unreleased/1304-skriss @@ -0,0 +1 @@ +enable restore item actions to return additional related items to be restored; have pods return PVCs and PVCs return PVs diff --git a/pkg/cmd/server/plugin/plugin.go b/pkg/cmd/server/plugin/plugin.go index 2d8474dcf..cdec1f383 100644 --- a/pkg/cmd/server/plugin/plugin.go +++ b/pkg/cmd/server/plugin/plugin.go @@ -52,6 +52,8 @@ func NewCommand(f client.Factory) *cobra.Command { RegisterRestoreItemAction("restic", newResticRestoreItemAction). RegisterRestoreItemAction("service", newServiceRestoreItemAction). RegisterRestoreItemAction("serviceaccount", newServiceAccountRestoreItemAction). + RegisterRestoreItemAction("addPVCFromPod", newAddPVCFromPodRestoreItemAction). + RegisterRestoreItemAction("addPVFromPVC", newAddPVFromPVCRestoreItemAction). Serve() }, } @@ -137,3 +139,11 @@ func newServiceRestoreItemAction(logger logrus.FieldLogger) (interface{}, error) func newServiceAccountRestoreItemAction(logger logrus.FieldLogger) (interface{}, error) { return restore.NewServiceAccountAction(logger), nil } + +func newAddPVCFromPodRestoreItemAction(logger logrus.FieldLogger) (interface{}, error) { + return restore.NewAddPVCFromPodAction(logger), nil +} + +func newAddPVFromPVCRestoreItemAction(logger logrus.FieldLogger) (interface{}, error) { + return restore.NewAddPVFromPVCAction(logger), nil +} diff --git a/pkg/plugin/framework/restore_item_action_client.go b/pkg/plugin/framework/restore_item_action_client.go index 69ce404c4..d4b40e2ac 100644 --- a/pkg/plugin/framework/restore_item_action_client.go +++ b/pkg/plugin/framework/restore_item_action_client.go @@ -23,6 +23,7 @@ import ( "golang.org/x/net/context" "google.golang.org/grpc" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime/schema" proto "github.com/heptio/velero/pkg/plugin/generated" "github.com/heptio/velero/pkg/plugin/velero" @@ -82,7 +83,7 @@ func (c *RestoreItemActionGRPCClient) Execute(input *velero.RestoreItemActionExe return nil, errors.WithStack(err) } - req := &proto.RestoreExecuteRequest{ + req := &proto.RestoreItemActionExecuteRequest{ Plugin: c.plugin, Item: itemJSON, ItemFromBackup: itemFromBackupJSON, @@ -104,8 +105,23 @@ func (c *RestoreItemActionGRPCClient) Execute(input *velero.RestoreItemActionExe warning = errors.New(res.Warning) } + var additionalItems []velero.ResourceIdentifier + for _, itm := range res.AdditionalItems { + newItem := velero.ResourceIdentifier{ + GroupResource: schema.GroupResource{ + Group: itm.Group, + Resource: itm.Resource, + }, + Namespace: itm.Namespace, + Name: itm.Name, + } + + additionalItems = append(additionalItems, newItem) + } + return &velero.RestoreItemActionExecuteOutput{ - UpdatedItem: &updatedItem, - Warning: warning, + UpdatedItem: &updatedItem, + Warning: warning, + AdditionalItems: additionalItems, }, nil } diff --git a/pkg/plugin/framework/restore_item_action_server.go b/pkg/plugin/framework/restore_item_action_server.go index c15b6f468..8a62630ee 100644 --- a/pkg/plugin/framework/restore_item_action_server.go +++ b/pkg/plugin/framework/restore_item_action_server.go @@ -74,7 +74,7 @@ func (s *RestoreItemActionGRPCServer) AppliesTo(ctx context.Context, req *proto. }, nil } -func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *proto.RestoreExecuteRequest) (response *proto.RestoreExecuteResponse, err error) { +func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *proto.RestoreItemActionExecuteRequest) (response *proto.RestoreItemActionExecuteResponse, err error) { defer func() { if recoveredErr := handlePanic(recover()); recoveredErr != nil { err = recoveredErr @@ -113,9 +113,16 @@ func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *proto.Re return nil, newGRPCError(err) } - updatedItem, err := json.Marshal(executeOutput.UpdatedItem) - if err != nil { - return nil, newGRPCError(errors.WithStack(err)) + // If the plugin implementation returned a nil updateItem (meaning no modifications), reset updatedItem to the + // original item. + var updatedItemJSON []byte + if executeOutput.UpdatedItem == nil { + updatedItemJSON = req.Item + } else { + updatedItemJSON, err = json.Marshal(executeOutput.UpdatedItem.UnstructuredContent()) + if err != nil { + return nil, newGRPCError(errors.WithStack(err)) + } } var warnMessage string @@ -123,8 +130,23 @@ func (s *RestoreItemActionGRPCServer) Execute(ctx context.Context, req *proto.Re warnMessage = executeOutput.Warning.Error() } - return &proto.RestoreExecuteResponse{ - Item: updatedItem, + res := &proto.RestoreItemActionExecuteResponse{ + Item: updatedItemJSON, Warning: warnMessage, - }, nil + } + + for _, item := range executeOutput.AdditionalItems { + res.AdditionalItems = append(res.AdditionalItems, restoreResourceIdentifierToProto(item)) + } + + return res, nil +} + +func restoreResourceIdentifierToProto(id velero.ResourceIdentifier) *proto.ResourceIdentifier { + return &proto.ResourceIdentifier{ + Group: id.Group, + Resource: id.Resource, + Namespace: id.Namespace, + Name: id.Name, + } } diff --git a/pkg/plugin/generated/BackupItemAction.pb.go b/pkg/plugin/generated/BackupItemAction.pb.go index 13c7b3272..39b5279f0 100644 --- a/pkg/plugin/generated/BackupItemAction.pb.go +++ b/pkg/plugin/generated/BackupItemAction.pb.go @@ -15,7 +15,6 @@ It is generated from these files: It has these top-level messages: ExecuteRequest ExecuteResponse - ResourceIdentifier PutObjectRequest GetObjectRequest Bytes @@ -28,14 +27,15 @@ It has these top-level messages: CreateSignedURLResponse PluginIdentifier ListPluginsResponse - RestoreExecuteRequest - RestoreExecuteResponse + RestoreItemActionExecuteRequest + RestoreItemActionExecuteResponse Empty InitRequest AppliesToRequest AppliesToResponse Stack StackFrame + ResourceIdentifier CreateVolumeRequest CreateVolumeResponse GetVolumeInfoRequest @@ -126,50 +126,9 @@ func (m *ExecuteResponse) GetAdditionalItems() []*ResourceIdentifier { return nil } -type ResourceIdentifier struct { - Group string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` - Resource string `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` - Namespace string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` - Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` -} - -func (m *ResourceIdentifier) Reset() { *m = ResourceIdentifier{} } -func (m *ResourceIdentifier) String() string { return proto.CompactTextString(m) } -func (*ResourceIdentifier) ProtoMessage() {} -func (*ResourceIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -func (m *ResourceIdentifier) GetGroup() string { - if m != nil { - return m.Group - } - return "" -} - -func (m *ResourceIdentifier) GetResource() string { - if m != nil { - return m.Resource - } - return "" -} - -func (m *ResourceIdentifier) GetNamespace() string { - if m != nil { - return m.Namespace - } - return "" -} - -func (m *ResourceIdentifier) GetName() string { - if m != nil { - return m.Name - } - return "" -} - func init() { proto.RegisterType((*ExecuteRequest)(nil), "generated.ExecuteRequest") proto.RegisterType((*ExecuteResponse)(nil), "generated.ExecuteResponse") - proto.RegisterType((*ResourceIdentifier)(nil), "generated.ResourceIdentifier") } // Reference imports to suppress errors if they are not otherwise used. @@ -280,24 +239,21 @@ var _BackupItemAction_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("BackupItemAction.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 298 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x51, 0x4d, 0x4f, 0x02, 0x31, - 0x10, 0xcd, 0x02, 0xa2, 0x1d, 0x89, 0x98, 0xc6, 0x90, 0x75, 0xc5, 0x84, 0x70, 0xe2, 0xc4, 0x01, - 0xff, 0x80, 0x98, 0xa8, 0xe1, 0x5a, 0xf9, 0x03, 0x65, 0x77, 0xc4, 0xc6, 0xdd, 0xb6, 0xf6, 0x23, - 0xe1, 0xc7, 0xf8, 0x63, 0xcd, 0x76, 0x9b, 0x75, 0x45, 0x6e, 0x7d, 0xf3, 0xde, 0x4c, 0xe7, 0xbd, - 0x81, 0xc9, 0x13, 0xcf, 0x3f, 0xbd, 0xde, 0x38, 0xac, 0xd6, 0xb9, 0x13, 0x4a, 0x2e, 0xb5, 0x51, - 0x4e, 0x51, 0xb2, 0x47, 0x89, 0x86, 0x3b, 0x2c, 0xb2, 0xd1, 0xdb, 0x07, 0x37, 0x58, 0x34, 0xc4, - 0x7c, 0x0b, 0x57, 0xcf, 0x07, 0xcc, 0xbd, 0x43, 0x86, 0x5f, 0x1e, 0xad, 0xa3, 0x13, 0x18, 0xea, - 0xd2, 0xef, 0x85, 0x4c, 0x93, 0x59, 0xb2, 0x20, 0x2c, 0x22, 0x4a, 0x61, 0x20, 0x1c, 0x56, 0x69, - 0x6f, 0x96, 0x2c, 0x46, 0x2c, 0xbc, 0x6b, 0xed, 0x2e, 0x7c, 0x98, 0xf6, 0x43, 0x35, 0xa2, 0xb9, - 0x84, 0x71, 0x3b, 0xd5, 0x6a, 0x25, 0x2d, 0xb6, 0xed, 0x49, 0xa7, 0xfd, 0x15, 0xc6, 0xbc, 0x28, - 0x44, 0xbd, 0x27, 0x2f, 0xeb, 0x9d, 0x6d, 0xda, 0x9b, 0xf5, 0x17, 0x97, 0xab, 0xfb, 0x65, 0xbb, - 0xef, 0x92, 0xa1, 0x55, 0xde, 0xe4, 0xb8, 0x29, 0x50, 0x3a, 0xf1, 0x2e, 0xd0, 0xb0, 0xe3, 0xae, - 0xf9, 0x01, 0xe8, 0x7f, 0x19, 0xbd, 0x81, 0xb3, 0xbd, 0x51, 0x5e, 0x47, 0x23, 0x0d, 0xa0, 0x19, - 0x5c, 0x98, 0xa8, 0x0d, 0x5e, 0x08, 0x6b, 0x31, 0x9d, 0x02, 0x91, 0xbc, 0x42, 0xab, 0x79, 0x8e, - 0xc1, 0x12, 0x61, 0xbf, 0x85, 0xda, 0x42, 0x0d, 0xd2, 0x41, 0x20, 0xc2, 0x7b, 0xf5, 0x9d, 0xc0, - 0xf5, 0x71, 0xe6, 0xf4, 0x05, 0xc8, 0x5a, 0xeb, 0x52, 0xa0, 0xdd, 0x2a, 0x7a, 0xd7, 0xf1, 0xd2, - 0x56, 0x63, 0xd8, 0xd9, 0xf4, 0x34, 0x19, 0x33, 0x7b, 0x84, 0xf3, 0x18, 0x23, 0xbd, 0xed, 0x08, - 0xff, 0x1e, 0x2c, 0xcb, 0x4e, 0x51, 0xcd, 0x84, 0xdd, 0x30, 0x5c, 0xf9, 0xe1, 0x27, 0x00, 0x00, - 0xff, 0xff, 0x56, 0x1b, 0x70, 0xd6, 0x18, 0x02, 0x00, 0x00, + // 250 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x50, 0xcb, 0x4e, 0xc3, 0x30, + 0x10, 0x94, 0x5b, 0x54, 0x94, 0xa5, 0xa2, 0xc8, 0x87, 0x2a, 0x04, 0x90, 0xa2, 0x9e, 0x72, 0xca, + 0xa1, 0xfc, 0x00, 0x45, 0x02, 0xd4, 0xab, 0xe9, 0x0f, 0xb8, 0xf1, 0x52, 0x2c, 0x52, 0xdb, 0xf8, + 0x21, 0xf1, 0x31, 0x7c, 0x2c, 0x4a, 0x6a, 0x59, 0xa1, 0xca, 0xcd, 0xbb, 0xb3, 0x33, 0x9e, 0x19, + 0x58, 0x3e, 0xf3, 0xe6, 0x2b, 0x98, 0xad, 0xc7, 0xe3, 0xa6, 0xf1, 0x52, 0xab, 0xda, 0x58, 0xed, + 0x35, 0xcd, 0x0e, 0xa8, 0xd0, 0x72, 0x8f, 0xa2, 0x98, 0xbf, 0x7f, 0x72, 0x8b, 0xe2, 0x04, 0xac, + 0x76, 0x70, 0xfd, 0xf2, 0x83, 0x4d, 0xf0, 0xc8, 0xf0, 0x3b, 0xa0, 0xf3, 0x74, 0x09, 0x33, 0xd3, + 0x86, 0x83, 0x54, 0x39, 0x29, 0x49, 0x95, 0xb1, 0x38, 0x51, 0x0a, 0x17, 0xd2, 0xe3, 0x31, 0x9f, + 0x94, 0xa4, 0x9a, 0xb3, 0xfe, 0xdd, 0xdd, 0xee, 0xfb, 0x0f, 0xf3, 0x69, 0xbf, 0x8d, 0xd3, 0x4a, + 0xc1, 0x22, 0xa9, 0x3a, 0xa3, 0x95, 0xc3, 0x44, 0x27, 0x03, 0xfa, 0x1b, 0x2c, 0xb8, 0x10, 0xb2, + 0xf3, 0xc9, 0xdb, 0xce, 0xb3, 0xcb, 0x27, 0xe5, 0xb4, 0xba, 0x5a, 0x3f, 0xd4, 0xc9, 0x6f, 0xcd, + 0xd0, 0xe9, 0x60, 0x1b, 0xdc, 0x0a, 0x54, 0x5e, 0x7e, 0x48, 0xb4, 0xec, 0x9c, 0xb5, 0xfe, 0x25, + 0x70, 0x73, 0x9e, 0x9c, 0xbe, 0x42, 0xb6, 0x31, 0xa6, 0x95, 0xe8, 0x76, 0x9a, 0xde, 0x0d, 0x14, + 0xd3, 0x36, 0x46, 0x2e, 0xee, 0xc7, 0xc1, 0xe8, 0xfc, 0x09, 0x2e, 0x63, 0x18, 0x7a, 0x3b, 0x38, + 0xfc, 0x5f, 0x5b, 0x51, 0x8c, 0x41, 0x27, 0x85, 0xfd, 0xac, 0xef, 0xfa, 0xf1, 0x2f, 0x00, 0x00, + 0xff, 0xff, 0x3e, 0xb0, 0x7f, 0x0f, 0x9e, 0x01, 0x00, 0x00, } diff --git a/pkg/plugin/generated/RestoreItemAction.pb.go b/pkg/plugin/generated/RestoreItemAction.pb.go index b97b7b611..3467b7429 100644 --- a/pkg/plugin/generated/RestoreItemAction.pb.go +++ b/pkg/plugin/generated/RestoreItemAction.pb.go @@ -17,73 +17,83 @@ var _ = proto.Marshal var _ = fmt.Errorf var _ = math.Inf -type RestoreExecuteRequest struct { +type RestoreItemActionExecuteRequest struct { Plugin string `protobuf:"bytes,1,opt,name=plugin" json:"plugin,omitempty"` Item []byte `protobuf:"bytes,2,opt,name=item,proto3" json:"item,omitempty"` Restore []byte `protobuf:"bytes,3,opt,name=restore,proto3" json:"restore,omitempty"` ItemFromBackup []byte `protobuf:"bytes,4,opt,name=itemFromBackup,proto3" json:"itemFromBackup,omitempty"` } -func (m *RestoreExecuteRequest) Reset() { *m = RestoreExecuteRequest{} } -func (m *RestoreExecuteRequest) String() string { return proto.CompactTextString(m) } -func (*RestoreExecuteRequest) ProtoMessage() {} -func (*RestoreExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } +func (m *RestoreItemActionExecuteRequest) Reset() { *m = RestoreItemActionExecuteRequest{} } +func (m *RestoreItemActionExecuteRequest) String() string { return proto.CompactTextString(m) } +func (*RestoreItemActionExecuteRequest) ProtoMessage() {} +func (*RestoreItemActionExecuteRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } -func (m *RestoreExecuteRequest) GetPlugin() string { +func (m *RestoreItemActionExecuteRequest) GetPlugin() string { if m != nil { return m.Plugin } return "" } -func (m *RestoreExecuteRequest) GetItem() []byte { +func (m *RestoreItemActionExecuteRequest) GetItem() []byte { if m != nil { return m.Item } return nil } -func (m *RestoreExecuteRequest) GetRestore() []byte { +func (m *RestoreItemActionExecuteRequest) GetRestore() []byte { if m != nil { return m.Restore } return nil } -func (m *RestoreExecuteRequest) GetItemFromBackup() []byte { +func (m *RestoreItemActionExecuteRequest) GetItemFromBackup() []byte { if m != nil { return m.ItemFromBackup } return nil } -type RestoreExecuteResponse struct { - Item []byte `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` - Warning string `protobuf:"bytes,2,opt,name=warning" json:"warning,omitempty"` +type RestoreItemActionExecuteResponse struct { + Item []byte `protobuf:"bytes,1,opt,name=item,proto3" json:"item,omitempty"` + Warning string `protobuf:"bytes,2,opt,name=warning" json:"warning,omitempty"` + AdditionalItems []*ResourceIdentifier `protobuf:"bytes,3,rep,name=additionalItems" json:"additionalItems,omitempty"` } -func (m *RestoreExecuteResponse) Reset() { *m = RestoreExecuteResponse{} } -func (m *RestoreExecuteResponse) String() string { return proto.CompactTextString(m) } -func (*RestoreExecuteResponse) ProtoMessage() {} -func (*RestoreExecuteResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } +func (m *RestoreItemActionExecuteResponse) Reset() { *m = RestoreItemActionExecuteResponse{} } +func (m *RestoreItemActionExecuteResponse) String() string { return proto.CompactTextString(m) } +func (*RestoreItemActionExecuteResponse) ProtoMessage() {} +func (*RestoreItemActionExecuteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor3, []int{1} +} -func (m *RestoreExecuteResponse) GetItem() []byte { +func (m *RestoreItemActionExecuteResponse) GetItem() []byte { if m != nil { return m.Item } return nil } -func (m *RestoreExecuteResponse) GetWarning() string { +func (m *RestoreItemActionExecuteResponse) GetWarning() string { if m != nil { return m.Warning } return "" } +func (m *RestoreItemActionExecuteResponse) GetAdditionalItems() []*ResourceIdentifier { + if m != nil { + return m.AdditionalItems + } + return nil +} + func init() { - proto.RegisterType((*RestoreExecuteRequest)(nil), "generated.RestoreExecuteRequest") - proto.RegisterType((*RestoreExecuteResponse)(nil), "generated.RestoreExecuteResponse") + proto.RegisterType((*RestoreItemActionExecuteRequest)(nil), "generated.RestoreItemActionExecuteRequest") + proto.RegisterType((*RestoreItemActionExecuteResponse)(nil), "generated.RestoreItemActionExecuteResponse") } // Reference imports to suppress errors if they are not otherwise used. @@ -98,7 +108,7 @@ const _ = grpc.SupportPackageIsVersion4 type RestoreItemActionClient interface { AppliesTo(ctx context.Context, in *AppliesToRequest, opts ...grpc.CallOption) (*AppliesToResponse, error) - Execute(ctx context.Context, in *RestoreExecuteRequest, opts ...grpc.CallOption) (*RestoreExecuteResponse, error) + Execute(ctx context.Context, in *RestoreItemActionExecuteRequest, opts ...grpc.CallOption) (*RestoreItemActionExecuteResponse, error) } type restoreItemActionClient struct { @@ -118,8 +128,8 @@ func (c *restoreItemActionClient) AppliesTo(ctx context.Context, in *AppliesToRe return out, nil } -func (c *restoreItemActionClient) Execute(ctx context.Context, in *RestoreExecuteRequest, opts ...grpc.CallOption) (*RestoreExecuteResponse, error) { - out := new(RestoreExecuteResponse) +func (c *restoreItemActionClient) Execute(ctx context.Context, in *RestoreItemActionExecuteRequest, opts ...grpc.CallOption) (*RestoreItemActionExecuteResponse, error) { + out := new(RestoreItemActionExecuteResponse) err := grpc.Invoke(ctx, "/generated.RestoreItemAction/Execute", in, out, c.cc, opts...) if err != nil { return nil, err @@ -131,7 +141,7 @@ func (c *restoreItemActionClient) Execute(ctx context.Context, in *RestoreExecut type RestoreItemActionServer interface { AppliesTo(context.Context, *AppliesToRequest) (*AppliesToResponse, error) - Execute(context.Context, *RestoreExecuteRequest) (*RestoreExecuteResponse, error) + Execute(context.Context, *RestoreItemActionExecuteRequest) (*RestoreItemActionExecuteResponse, error) } func RegisterRestoreItemActionServer(s *grpc.Server, srv RestoreItemActionServer) { @@ -157,7 +167,7 @@ func _RestoreItemAction_AppliesTo_Handler(srv interface{}, ctx context.Context, } func _RestoreItemAction_Execute_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RestoreExecuteRequest) + in := new(RestoreItemActionExecuteRequest) if err := dec(in); err != nil { return nil, err } @@ -169,7 +179,7 @@ func _RestoreItemAction_Execute_Handler(srv interface{}, ctx context.Context, de FullMethod: "/generated.RestoreItemAction/Execute", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(RestoreItemActionServer).Execute(ctx, req.(*RestoreExecuteRequest)) + return srv.(RestoreItemActionServer).Execute(ctx, req.(*RestoreItemActionExecuteRequest)) } return interceptor(ctx, in, info, handler) } @@ -194,21 +204,24 @@ var _RestoreItemAction_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("RestoreItemAction.proto", fileDescriptor3) } var fileDescriptor3 = []byte{ - // 252 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x51, 0x4b, 0x4e, 0xc3, 0x30, - 0x14, 0x94, 0xa1, 0x6a, 0x95, 0xa7, 0x0a, 0x89, 0x27, 0x51, 0xac, 0xc0, 0x22, 0x74, 0x81, 0xba, - 0xca, 0x02, 0x4e, 0x50, 0x24, 0x22, 0xb1, 0x61, 0x61, 0xb8, 0x40, 0x48, 0x9f, 0x82, 0x45, 0x63, - 0x1b, 0x7f, 0x04, 0x17, 0xe0, 0x34, 0x5c, 0x12, 0xe1, 0xb8, 0x11, 0x94, 0x88, 0x9d, 0x67, 0xde, - 0xf8, 0xcd, 0x78, 0x0c, 0xa7, 0x82, 0x9c, 0xd7, 0x96, 0xee, 0x3c, 0x75, 0xeb, 0xc6, 0x4b, 0xad, - 0x4a, 0x63, 0xb5, 0xd7, 0x98, 0xb5, 0xa4, 0xc8, 0xd6, 0x9e, 0x36, 0xf9, 0xfc, 0xe1, 0xb9, 0xb6, - 0xb4, 0xe9, 0x07, 0xcb, 0x0f, 0x06, 0x27, 0xe9, 0xd2, 0xed, 0x3b, 0x35, 0xc1, 0x93, 0xa0, 0xd7, - 0x40, 0xce, 0xe3, 0x02, 0xa6, 0x66, 0x1b, 0x5a, 0xa9, 0x38, 0x2b, 0xd8, 0x2a, 0x13, 0x09, 0x21, - 0xc2, 0x44, 0x7a, 0xea, 0xf8, 0x41, 0xc1, 0x56, 0x73, 0x11, 0xcf, 0xc8, 0x61, 0x66, 0xfb, 0x25, - 0xfc, 0x30, 0xd2, 0x3b, 0x88, 0x97, 0x70, 0xf4, 0xad, 0xa8, 0xac, 0xee, 0x6e, 0xea, 0xe6, 0x25, - 0x18, 0x3e, 0x89, 0x82, 0x3d, 0x76, 0x59, 0xc1, 0x62, 0x3f, 0x86, 0x33, 0x5a, 0x39, 0x1a, 0xfc, - 0xd8, 0x6f, 0xbf, 0xb7, 0xda, 0x2a, 0xa9, 0xda, 0x18, 0x23, 0x13, 0x3b, 0x78, 0xf5, 0xc9, 0xe0, - 0xf8, 0x4f, 0x09, 0x58, 0x41, 0xb6, 0x36, 0x66, 0x2b, 0xc9, 0x3d, 0x6a, 0x3c, 0x2b, 0x87, 0x32, - 0xca, 0x81, 0x4d, 0xaf, 0xce, 0xcf, 0xc7, 0x87, 0x29, 0xcb, 0x3d, 0xcc, 0x52, 0x3c, 0x2c, 0x7e, - 0x08, 0x47, 0x0b, 0xcc, 0x2f, 0xfe, 0x51, 0xf4, 0xfb, 0x9e, 0xa6, 0xf1, 0x13, 0xae, 0xbf, 0x02, - 0x00, 0x00, 0xff, 0xff, 0x19, 0xeb, 0x64, 0x57, 0xb8, 0x01, 0x00, 0x00, + // 293 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x51, 0x4d, 0x4e, 0xf3, 0x30, + 0x10, 0x95, 0xbf, 0x56, 0xad, 0x32, 0x5f, 0x05, 0xc2, 0x0b, 0xb0, 0x02, 0x88, 0xa8, 0x0b, 0x14, + 0x81, 0x94, 0x45, 0x39, 0x41, 0x91, 0x28, 0xea, 0xd6, 0x70, 0x01, 0x37, 0x19, 0x82, 0x45, 0x62, + 0x1b, 0xdb, 0x11, 0xdc, 0x82, 0x35, 0xa7, 0xe1, 0x6a, 0x28, 0x3f, 0x8d, 0x68, 0x0b, 0x88, 0x9d, + 0xe7, 0xf9, 0xcd, 0x7b, 0x6f, 0x66, 0xe0, 0x88, 0xa3, 0xf3, 0xda, 0xe2, 0xd2, 0x63, 0x39, 0x4f, + 0xbd, 0xd4, 0x2a, 0x31, 0x56, 0x7b, 0x4d, 0x83, 0x1c, 0x15, 0x5a, 0xe1, 0x31, 0x0b, 0x27, 0x77, + 0x8f, 0xc2, 0x62, 0xd6, 0x7e, 0x4c, 0xdf, 0x08, 0x9c, 0xed, 0x34, 0xdd, 0xbc, 0x62, 0x5a, 0x79, + 0xe4, 0xf8, 0x5c, 0xa1, 0xf3, 0xf4, 0x10, 0x46, 0xa6, 0xa8, 0x72, 0xa9, 0x18, 0x89, 0x48, 0x1c, + 0xf0, 0xae, 0xa2, 0x14, 0x86, 0xd2, 0x63, 0xc9, 0xfe, 0x45, 0x24, 0x9e, 0xf0, 0xe6, 0x4d, 0x19, + 0x8c, 0x6d, 0x2b, 0xc7, 0x06, 0x0d, 0xbc, 0x2e, 0xe9, 0x39, 0xec, 0xd5, 0x8c, 0x85, 0xd5, 0xe5, + 0xb5, 0x48, 0x9f, 0x2a, 0xc3, 0x86, 0x0d, 0x61, 0x0b, 0x9d, 0xbe, 0x13, 0x88, 0x7e, 0x4e, 0xe4, + 0x8c, 0x56, 0x0e, 0x7b, 0x6b, 0xb2, 0x69, 0xfd, 0x22, 0xac, 0x92, 0x2a, 0x6f, 0x12, 0x05, 0x7c, + 0x5d, 0xd2, 0x5b, 0xd8, 0x17, 0x59, 0x26, 0x6b, 0x21, 0x51, 0xd4, 0xa2, 0x8e, 0x0d, 0xa2, 0x41, + 0xfc, 0x7f, 0x76, 0x9a, 0xf4, 0x7b, 0x49, 0x38, 0x3a, 0x5d, 0xd9, 0x14, 0x97, 0x19, 0x2a, 0x2f, + 0x1f, 0x24, 0x5a, 0xbe, 0xdd, 0x35, 0xfb, 0x20, 0x70, 0xb0, 0x93, 0x8d, 0x2e, 0x20, 0x98, 0x1b, + 0x53, 0x48, 0x74, 0xf7, 0x9a, 0x1e, 0x7f, 0x91, 0xec, 0xd1, 0x6e, 0x93, 0xe1, 0xc9, 0xf7, 0x9f, + 0xdd, 0x50, 0x2b, 0x18, 0x77, 0x73, 0xd2, 0x8b, 0xcd, 0x60, 0xbf, 0x9d, 0x27, 0xbc, 0xfc, 0x13, + 0xb7, 0xf5, 0x58, 0x8d, 0x9a, 0xb3, 0x5f, 0x7d, 0x06, 0x00, 0x00, 0xff, 0xff, 0x18, 0xd0, 0x19, + 0x9b, 0x2a, 0x02, 0x00, 0x00, } diff --git a/pkg/plugin/generated/Shared.pb.go b/pkg/plugin/generated/Shared.pb.go index c0085d5dc..983229f2a 100644 --- a/pkg/plugin/generated/Shared.pb.go +++ b/pkg/plugin/generated/Shared.pb.go @@ -156,6 +156,46 @@ func (m *StackFrame) GetFunction() string { return "" } +type ResourceIdentifier struct { + Group string `protobuf:"bytes,1,opt,name=group" json:"group,omitempty"` + Resource string `protobuf:"bytes,2,opt,name=resource" json:"resource,omitempty"` + Namespace string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"` + Name string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"` +} + +func (m *ResourceIdentifier) Reset() { *m = ResourceIdentifier{} } +func (m *ResourceIdentifier) String() string { return proto.CompactTextString(m) } +func (*ResourceIdentifier) ProtoMessage() {} +func (*ResourceIdentifier) Descriptor() ([]byte, []int) { return fileDescriptor4, []int{6} } + +func (m *ResourceIdentifier) GetGroup() string { + if m != nil { + return m.Group + } + return "" +} + +func (m *ResourceIdentifier) GetResource() string { + if m != nil { + return m.Resource + } + return "" +} + +func (m *ResourceIdentifier) GetNamespace() string { + if m != nil { + return m.Namespace + } + return "" +} + +func (m *ResourceIdentifier) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func init() { proto.RegisterType((*Empty)(nil), "generated.Empty") proto.RegisterType((*InitRequest)(nil), "generated.InitRequest") @@ -163,32 +203,36 @@ func init() { proto.RegisterType((*AppliesToResponse)(nil), "generated.AppliesToResponse") proto.RegisterType((*Stack)(nil), "generated.Stack") proto.RegisterType((*StackFrame)(nil), "generated.StackFrame") + proto.RegisterType((*ResourceIdentifier)(nil), "generated.ResourceIdentifier") } func init() { proto.RegisterFile("Shared.proto", fileDescriptor4) } var fileDescriptor4 = []byte{ - // 345 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0x4a, 0xeb, 0x40, - 0x14, 0x86, 0x49, 0xd2, 0xe4, 0xde, 0x9e, 0xdc, 0x45, 0x3b, 0x5c, 0x25, 0x74, 0x55, 0xb2, 0x2a, - 0xa2, 0x59, 0x54, 0x10, 0xed, 0x4e, 0xa4, 0x82, 0x1b, 0x91, 0xd4, 0x17, 0x88, 0xc9, 0x49, 0x1d, - 0x9a, 0xce, 0x8c, 0x33, 0x13, 0xb1, 0xef, 0xe2, 0x1b, 0xfa, 0x12, 0x32, 0x93, 0xb4, 0x0d, 0x44, - 0x70, 0x77, 0xce, 0xff, 0x7f, 0xf3, 0xf3, 0x4f, 0x26, 0xf0, 0x6f, 0xf5, 0x9a, 0x49, 0x2c, 0x12, - 0x21, 0xb9, 0xe6, 0x64, 0xb8, 0x46, 0x86, 0x32, 0xd3, 0x58, 0xc4, 0x7f, 0xc0, 0x5f, 0x6e, 0x85, - 0xde, 0xc5, 0x9f, 0x0e, 0x84, 0x0f, 0x8c, 0xea, 0x14, 0xdf, 0x6a, 0x54, 0x9a, 0x9c, 0x42, 0x20, - 0xaa, 0x7a, 0x4d, 0x59, 0xe4, 0x4c, 0x9d, 0xd9, 0x30, 0x6d, 0x37, 0xb2, 0x80, 0x20, 0xe7, 0xac, - 0xa4, 0xeb, 0xc8, 0x9d, 0x7a, 0xb3, 0x70, 0x1e, 0x27, 0x87, 0xb0, 0xa4, 0x73, 0x3e, 0xb9, 0xb3, - 0xd0, 0x92, 0x69, 0xb9, 0x4b, 0xdb, 0x13, 0x93, 0x1b, 0x08, 0x3b, 0x32, 0x19, 0x81, 0xb7, 0xc1, - 0x5d, 0x9b, 0x6f, 0x46, 0xf2, 0x1f, 0xfc, 0xf7, 0xac, 0xaa, 0x31, 0x72, 0xad, 0xd6, 0x2c, 0x0b, - 0xf7, 0xda, 0x89, 0xcf, 0x60, 0x74, 0x2b, 0x44, 0x45, 0x51, 0x3d, 0xf3, 0x5f, 0x2a, 0xc6, 0x5f, - 0x0e, 0x8c, 0x3b, 0xb0, 0x12, 0x9c, 0x29, 0x24, 0x09, 0x10, 0xca, 0xf2, 0xaa, 0x2e, 0xb0, 0x78, - 0xcc, 0xb6, 0xa8, 0x44, 0x96, 0xa3, 0x8a, 0x9c, 0xa9, 0x37, 0x1b, 0xa6, 0x3f, 0x38, 0x86, 0xc7, - 0x8f, 0x1e, 0xef, 0x36, 0x7c, 0xdf, 0x21, 0xe7, 0x30, 0xde, 0xa7, 0xa4, 0xa8, 0x78, 0x2d, 0x0d, - 0xee, 0x59, 0xbc, 0x6f, 0x18, 0x7a, 0x9f, 0x71, 0xa4, 0x07, 0x0d, 0xdd, 0x33, 0xc8, 0x04, 0xfe, - 0x2a, 0xac, 0x30, 0xd7, 0x5c, 0x46, 0xbe, 0xbd, 0xeb, 0x61, 0x8f, 0xaf, 0xc0, 0x5f, 0xe9, 0x2c, - 0xdf, 0x90, 0x0b, 0x08, 0x4a, 0x69, 0xfa, 0xd8, 0x4b, 0x85, 0xf3, 0x93, 0xce, 0xcb, 0x58, 0xe2, - 0xde, 0xb8, 0x69, 0x0b, 0xc5, 0x4f, 0x00, 0x47, 0x95, 0x10, 0x18, 0x94, 0xb4, 0xc2, 0xf6, 0x4b, - 0xda, 0xd9, 0x68, 0x15, 0x65, 0xcd, 0x63, 0xf8, 0xa9, 0x9d, 0x4d, 0x93, 0xb2, 0x66, 0xb9, 0xa6, - 0x9c, 0x45, 0x5e, 0xd3, 0x64, 0xbf, 0xbf, 0x04, 0xf6, 0xef, 0xba, 0xfc, 0x0e, 0x00, 0x00, 0xff, - 0xff, 0x2c, 0x72, 0x6a, 0xd8, 0x6d, 0x02, 0x00, 0x00, + // 392 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xc1, 0xaa, 0xd4, 0x30, + 0x14, 0x86, 0x69, 0x3b, 0xad, 0xf6, 0xd4, 0xc5, 0xbd, 0x41, 0xa5, 0x5c, 0x5c, 0x0c, 0x59, 0x0d, + 0xa2, 0x5d, 0x5c, 0x41, 0xf4, 0xee, 0x44, 0x46, 0x98, 0x8d, 0x48, 0xc7, 0x17, 0xa8, 0xed, 0x69, + 0x0d, 0xd3, 0x49, 0x62, 0x92, 0xca, 0xcc, 0xbb, 0xf8, 0x86, 0xbe, 0x84, 0x24, 0x4d, 0x67, 0x0a, + 0x15, 0xee, 0xee, 0xfc, 0xe7, 0x7c, 0xf9, 0xfb, 0x27, 0xa7, 0xf0, 0x6c, 0xff, 0xb3, 0x52, 0xd8, + 0x14, 0x52, 0x09, 0x23, 0x48, 0xda, 0x21, 0x47, 0x55, 0x19, 0x6c, 0xe8, 0x13, 0x88, 0xb7, 0x47, + 0x69, 0xce, 0xf4, 0x4f, 0x00, 0xd9, 0x8e, 0x33, 0x53, 0xe2, 0xaf, 0x01, 0xb5, 0x21, 0x2f, 0x21, + 0x91, 0xfd, 0xd0, 0x31, 0x9e, 0x07, 0xeb, 0x60, 0x93, 0x96, 0x5e, 0x91, 0x07, 0x48, 0x6a, 0xc1, + 0x5b, 0xd6, 0xe5, 0xe1, 0x3a, 0xda, 0x64, 0xf7, 0xb4, 0xb8, 0x98, 0x15, 0xb3, 0xf3, 0xc5, 0x67, + 0x07, 0x6d, 0xb9, 0x51, 0xe7, 0xd2, 0x9f, 0xb8, 0xfb, 0x08, 0xd9, 0xac, 0x4d, 0x6e, 0x20, 0x3a, + 0xe0, 0xd9, 0xfb, 0xdb, 0x92, 0x3c, 0x87, 0xf8, 0x77, 0xd5, 0x0f, 0x98, 0x87, 0xae, 0x37, 0x8a, + 0x87, 0xf0, 0x43, 0x40, 0x5f, 0xc3, 0xcd, 0x27, 0x29, 0x7b, 0x86, 0xfa, 0xbb, 0x78, 0x24, 0x22, + 0xfd, 0x1b, 0xc0, 0xed, 0x0c, 0xd6, 0x52, 0x70, 0x8d, 0xa4, 0x00, 0xc2, 0x78, 0xdd, 0x0f, 0x0d, + 0x36, 0x5f, 0xab, 0x23, 0x6a, 0x59, 0xd5, 0xa8, 0xf3, 0x60, 0x1d, 0x6d, 0xd2, 0xf2, 0x3f, 0x13, + 0xcb, 0xe3, 0x69, 0xc1, 0x87, 0x23, 0xbf, 0x9c, 0x90, 0x37, 0x70, 0x3b, 0xb9, 0x94, 0xa8, 0xc5, + 0xa0, 0x2c, 0x1e, 0x39, 0x7c, 0x39, 0xb0, 0xf4, 0xe4, 0x71, 0xa5, 0x57, 0x23, 0xbd, 0x18, 0x90, + 0x3b, 0x78, 0xaa, 0xb1, 0xc7, 0xda, 0x08, 0x95, 0xc7, 0xee, 0xae, 0x17, 0x4d, 0xdf, 0x43, 0xbc, + 0x37, 0x55, 0x7d, 0x20, 0x6f, 0x21, 0x69, 0x95, 0xcd, 0xe3, 0x2e, 0x95, 0xdd, 0xbf, 0x98, 0x6d, + 0xc6, 0x11, 0x5f, 0xec, 0xb4, 0xf4, 0x10, 0xfd, 0x06, 0x70, 0xed, 0x12, 0x02, 0xab, 0x96, 0xf5, + 0xe8, 0x5f, 0xd2, 0xd5, 0xb6, 0xd7, 0x33, 0x3e, 0x2e, 0x23, 0x2e, 0x5d, 0x6d, 0x93, 0xb4, 0x03, + 0xaf, 0x0d, 0x13, 0x3c, 0x8f, 0xc6, 0x24, 0x93, 0xa6, 0x27, 0x20, 0x53, 0xe4, 0x5d, 0x83, 0xdc, + 0xb0, 0x96, 0xa1, 0xb2, 0x3b, 0xed, 0x94, 0x18, 0xa4, 0xb7, 0x1e, 0x85, 0xf5, 0x51, 0x9e, 0xf5, + 0xcb, 0xbe, 0x68, 0xf2, 0x0a, 0x52, 0x3e, 0xbd, 0xab, 0xff, 0xc8, 0xb5, 0x61, 0x53, 0x59, 0x91, + 0xaf, 0xc6, 0xa4, 0xb6, 0xfe, 0x91, 0xb8, 0xff, 0xfa, 0xdd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0x17, 0x0a, 0x8a, 0x0c, 0xe7, 0x02, 0x00, 0x00, } diff --git a/pkg/plugin/proto/BackupItemAction.proto b/pkg/plugin/proto/BackupItemAction.proto index a3d58a47a..0d795fa26 100644 --- a/pkg/plugin/proto/BackupItemAction.proto +++ b/pkg/plugin/proto/BackupItemAction.proto @@ -14,13 +14,6 @@ message ExecuteResponse { repeated ResourceIdentifier additionalItems = 2; } -message ResourceIdentifier { - string group = 1; - string resource = 2; - string namespace = 3; - string name = 4; -} - service BackupItemAction { rpc AppliesTo(AppliesToRequest) returns (AppliesToResponse); rpc Execute(ExecuteRequest) returns (ExecuteResponse); diff --git a/pkg/plugin/proto/RestoreItemAction.proto b/pkg/plugin/proto/RestoreItemAction.proto index 87c186565..24463f233 100644 --- a/pkg/plugin/proto/RestoreItemAction.proto +++ b/pkg/plugin/proto/RestoreItemAction.proto @@ -3,19 +3,20 @@ package generated; import "Shared.proto"; -message RestoreExecuteRequest { +message RestoreItemActionExecuteRequest { string plugin = 1; bytes item = 2; bytes restore = 3; bytes itemFromBackup = 4; } -message RestoreExecuteResponse { +message RestoreItemActionExecuteResponse { bytes item = 1; string warning = 2; + repeated ResourceIdentifier additionalItems = 3; } service RestoreItemAction { rpc AppliesTo(AppliesToRequest) returns (AppliesToResponse); - rpc Execute(RestoreExecuteRequest) returns (RestoreExecuteResponse); + rpc Execute(RestoreItemActionExecuteRequest) returns (RestoreItemActionExecuteResponse); } diff --git a/pkg/plugin/proto/Shared.proto b/pkg/plugin/proto/Shared.proto index fc96e9ba0..e593d6231 100644 --- a/pkg/plugin/proto/Shared.proto +++ b/pkg/plugin/proto/Shared.proto @@ -29,3 +29,10 @@ message StackFrame { int32 line = 2; string function = 3; } + +message ResourceIdentifier { + string group = 1; + string resource = 2; + string namespace = 3; + string name = 4; +} diff --git a/pkg/plugin/velero/restore_item_action.go b/pkg/plugin/velero/restore_item_action.go index 810b3158e..e8d8b5aec 100644 --- a/pkg/plugin/velero/restore_item_action.go +++ b/pkg/plugin/velero/restore_item_action.go @@ -31,7 +31,8 @@ type RestoreItemAction interface { // Execute allows the ItemAction to perform arbitrary logic with the item being restored, // including mutating the item itself prior to restore. The item (unmodified or modified) - // should be returned, along with a warning (which will be logged but will not prevent + // should be returned, along with an optional slice of ResourceIdentifiers specifying additional + // related items that should be restored, a warning (which will be logged but will not prevent // the item from being restored) or error (which will be logged and will prevent the item // from being restored) if applicable. Execute(input *RestoreItemActionExecuteInput) (*RestoreItemActionExecuteOutput, error) @@ -55,6 +56,9 @@ type RestoreItemActionExecuteOutput struct { // Warning is an exceptional message returned from ItemAction // which is not preventing the item from being restored. Warning error + // AdditionalItems is a list of additional related items that should + // be restored. + AdditionalItems []ResourceIdentifier } // NewRestoreItemActionExecuteOutput creates a new RestoreItemActionExecuteOutput diff --git a/pkg/restore/add_pv_from_pvc_action.go b/pkg/restore/add_pv_from_pvc_action.go new file mode 100644 index 000000000..a0503dbad --- /dev/null +++ b/pkg/restore/add_pv_from_pvc_action.go @@ -0,0 +1,71 @@ +/* +Copyright 2019 the Velero contributors. + + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/plugin/velero" +) + +type AddPVFromPVCAction struct { + logger logrus.FieldLogger +} + +func NewAddPVFromPVCAction(logger logrus.FieldLogger) *AddPVFromPVCAction { + return &AddPVFromPVCAction{logger: logger} +} + +func (a *AddPVFromPVCAction) AppliesTo() (velero.ResourceSelector, error) { + return velero.ResourceSelector{ + IncludedResources: []string{"persistentvolumeclaims"}, + }, nil +} + +func (a *AddPVFromPVCAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + a.logger.Info("Executing AddPVFromPVCAction") + + // use input.ItemFromBackup because we need to look at status fields, which have already been + // removed from input.Item + var pvc corev1api.PersistentVolumeClaim + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.ItemFromBackup.UnstructuredContent(), &pvc); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to persistent volume claim") + } + + // TODO: consolidate this logic in a helper function to share with backup_pv_action.go + if pvc.Status.Phase != corev1api.ClaimBound || pvc.Spec.VolumeName == "" { + a.logger.Info("PVC is not bound or its volume name is empty") + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + }, nil + } + + pv := velero.ResourceIdentifier{ + GroupResource: kuberesource.PersistentVolumes, + Name: pvc.Spec.VolumeName, + } + + a.logger.Infof("Adding PV %s as an additional item to restore", pvc.Spec.VolumeName) + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: []velero.ResourceIdentifier{pv}, + }, nil +} diff --git a/pkg/restore/add_pv_from_pvc_action_test.go b/pkg/restore/add_pv_from_pvc_action_test.go new file mode 100644 index 000000000..d9fcbd5e3 --- /dev/null +++ b/pkg/restore/add_pv_from_pvc_action_test.go @@ -0,0 +1,103 @@ +/* +Copyright 2019 the Velero contributors. + + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/plugin/velero" + velerotest "github.com/heptio/velero/pkg/util/test" +) + +func TestAddPVFromPVCActionExecute(t *testing.T) { + tests := []struct { + name string + itemFromBackup *v1.PersistentVolumeClaim + want []velero.ResourceIdentifier + }{ + { + name: "bound PVC with volume name returns associated PV", + itemFromBackup: &v1.PersistentVolumeClaim{ + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "bound-pv", + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + }, + }, + want: []velero.ResourceIdentifier{ + { + GroupResource: kuberesource.PersistentVolumes, + Name: "bound-pv", + }, + }, + }, + { + name: "unbound PVC with volume name does not return any additional items", + itemFromBackup: &v1.PersistentVolumeClaim{ + Spec: v1.PersistentVolumeClaimSpec{ + VolumeName: "pending-pv", + }, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimPending, + }, + }, + want: nil, + }, + { + name: "bound PVC without volume name does not return any additional items", + itemFromBackup: &v1.PersistentVolumeClaim{ + Spec: v1.PersistentVolumeClaimSpec{}, + Status: v1.PersistentVolumeClaimStatus{ + Phase: v1.ClaimBound, + }, + }, + want: nil, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + itemFromBackupData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(test.itemFromBackup) + require.NoError(t, err) + + itemData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(test.itemFromBackup) + require.NoError(t, err) + // item should have no status + delete(itemData, "status") + + action := &AddPVFromPVCAction{logger: velerotest.NewLogger()} + + input := &velero.RestoreItemActionExecuteInput{ + Item: &unstructured.Unstructured{Object: itemData}, + ItemFromBackup: &unstructured.Unstructured{Object: itemFromBackupData}, + } + + res, err := action.Execute(input) + require.NoError(t, err) + + assert.Equal(t, test.want, res.AdditionalItems) + }) + } +} diff --git a/pkg/restore/add_pvc_from_pod_action.go b/pkg/restore/add_pvc_from_pod_action.go new file mode 100644 index 000000000..11e740a31 --- /dev/null +++ b/pkg/restore/add_pvc_from_pod_action.go @@ -0,0 +1,70 @@ +/* +Copyright 2019 the Velero contributors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + corev1api "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/plugin/velero" +) + +type AddPVCFromPodAction struct { + logger logrus.FieldLogger +} + +func NewAddPVCFromPodAction(logger logrus.FieldLogger) *AddPVCFromPodAction { + return &AddPVCFromPodAction{logger: logger} +} + +func (a *AddPVCFromPodAction) AppliesTo() (velero.ResourceSelector, error) { + return velero.ResourceSelector{ + IncludedResources: []string{"pods"}, + }, nil +} + +func (a *AddPVCFromPodAction) Execute(input *velero.RestoreItemActionExecuteInput) (*velero.RestoreItemActionExecuteOutput, error) { + a.logger.Info("Executing AddPVCFromPodAction") + + var pod corev1api.Pod + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(input.Item.UnstructuredContent(), &pod); err != nil { + return nil, errors.Wrap(err, "unable to convert unstructured item to pod") + } + + var additionalItems []velero.ResourceIdentifier + + for _, volume := range pod.Spec.Volumes { + if volume.PersistentVolumeClaim == nil { + continue + } + + a.logger.Infof("Adding PVC %s/%s as an additional item to restore", pod.Namespace, volume.PersistentVolumeClaim.ClaimName) + additionalItems = append(additionalItems, velero.ResourceIdentifier{ + GroupResource: kuberesource.PersistentVolumeClaims, + Namespace: pod.Namespace, + Name: volume.PersistentVolumeClaim.ClaimName, + }) + } + + return &velero.RestoreItemActionExecuteOutput{ + UpdatedItem: input.Item, + AdditionalItems: additionalItems, + }, nil +} diff --git a/pkg/restore/add_pvc_from_pod_action_test.go b/pkg/restore/add_pvc_from_pod_action_test.go new file mode 100644 index 000000000..11e574dd9 --- /dev/null +++ b/pkg/restore/add_pvc_from_pod_action_test.go @@ -0,0 +1,113 @@ +/* +Copyright 2019 the Velero contributors. + + Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package restore + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + + "github.com/heptio/velero/pkg/kuberesource" + "github.com/heptio/velero/pkg/plugin/velero" + velerotest "github.com/heptio/velero/pkg/util/test" +) + +func TestAddPVCFromPodActionExecute(t *testing.T) { + tests := []struct { + name string + item *v1.Pod + want []velero.ResourceIdentifier + }{ + { + name: "pod with no volumes returns no additional items", + item: &v1.Pod{}, + want: nil, + }, + { + name: "pod with some PVCs returns them as additional items", + item: &v1.Pod{ + ObjectMeta: metav1.ObjectMeta{ + Namespace: "ns-1", + Name: "foo", + }, + Spec: v1.PodSpec{ + Volumes: []v1.Volume{ + { + VolumeSource: v1.VolumeSource{ + EmptyDir: new(v1.EmptyDirVolumeSource), + }, + }, + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-1", + }, + }, + }, + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-2", + }, + }, + }, + { + VolumeSource: v1.VolumeSource{ + HostPath: new(v1.HostPathVolumeSource), + }, + }, + { + VolumeSource: v1.VolumeSource{ + PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ + ClaimName: "pvc-3", + }, + }, + }, + }, + }, + }, + want: []velero.ResourceIdentifier{ + {GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "ns-1", Name: "pvc-1"}, + {GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "ns-1", Name: "pvc-2"}, + {GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "ns-1", Name: "pvc-3"}, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + itemData, err := runtime.DefaultUnstructuredConverter.ToUnstructured(test.item) + require.NoError(t, err) + + action := &AddPVCFromPodAction{logger: velerotest.NewLogger()} + + input := &velero.RestoreItemActionExecuteInput{ + Item: &unstructured.Unstructured{Object: itemData}, + } + + res, err := action.Execute(input) + require.NoError(t, err) + + assert.Equal(t, test.want, res.AdditionalItems) + }) + } +} diff --git a/pkg/restore/restore.go b/pkg/restore/restore.go index e4433662c..714dc659a 100644 --- a/pkg/restore/restore.go +++ b/pkg/restore/restore.go @@ -1,5 +1,5 @@ /* -Copyright 2017 the Velero contributors. +Copyright 2017, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -260,6 +260,9 @@ func (kr *kubernetesRestorer) Restore( log: log, fileSystem: kr.fileSystem, }, + applicableActions: make(map[schema.GroupResource][]resolvedAction), + resourceClients: make(map[resourceClientKey]client.Dynamic), + restoredItems: make(map[velero.ResourceIdentifier]struct{}), } return restoreCtx.execute() @@ -330,6 +333,7 @@ type context struct { backup *api.Backup backupReader io.Reader restore *api.Restore + restoreDir string prioritizedResources []schema.GroupResource selector labels.Selector log logrus.FieldLogger @@ -345,6 +349,14 @@ type context struct { volumeSnapshots []*volume.Snapshot resourceTerminatingTimeout time.Duration extractor *backupExtractor + applicableActions map[schema.GroupResource][]resolvedAction + resourceClients map[resourceClientKey]client.Dynamic + restoredItems map[velero.ResourceIdentifier]struct{} +} + +type resourceClientKey struct { + resource schema.GroupResource + namespace string } func (ctx *context) execute() (api.RestoreResult, api.RestoreResult) { @@ -357,12 +369,15 @@ func (ctx *context) execute() (api.RestoreResult, api.RestoreResult) { } defer ctx.fileSystem.RemoveAll(dir) - return ctx.restoreFromDir(dir) + // need to set this for additionalItems to be restored + ctx.restoreDir = dir + + return ctx.restoreFromDir() } // restoreFromDir executes a restore based on backup data contained within a local -// directory. -func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreResult) { +// directory, ctx.restoreDir. +func (ctx *context) restoreFromDir() (api.RestoreResult, api.RestoreResult) { warnings, errs := api.RestoreResult{}, api.RestoreResult{} namespaceFilter := collections.NewIncludesExcludes(). @@ -370,7 +385,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe Excludes(ctx.restore.Spec.ExcludedNamespaces...) // Make sure the top level "resources" dir exists: - resourcesDir := filepath.Join(dir, api.ResourcesDir) + resourcesDir := filepath.Join(ctx.restoreDir, api.ResourcesDir) rde, err := ctx.fileSystem.DirExists(resourcesDir) if err != nil { addVeleroError(&errs, err) @@ -462,7 +477,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe // create a blank one. if !existingNamespaces.Has(mappedNsName) { logger := ctx.log.WithField("namespace", nsName) - ns := getNamespace(logger, filepath.Join(dir, api.ResourcesDir, "namespaces", api.ClusterScopedDir, nsName+".json"), mappedNsName) + ns := getNamespace(logger, getItemFilePath(ctx.restoreDir, "namespaces", "", nsName), mappedNsName) if _, err := kube.EnsureNamespaceExistsAndIsReady(ns, ctx.namespaceClient, ctx.resourceTerminatingTimeout); err != nil { addVeleroError(&errs, err) continue @@ -494,6 +509,15 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe return warnings, errs } +func getItemFilePath(rootDir, groupResource, namespace, name string) string { + switch namespace { + case "": + return filepath.Join(rootDir, api.ResourcesDir, groupResource, api.ClusterScopedDir, name+".json") + default: + return filepath.Join(rootDir, api.ResourcesDir, groupResource, api.NamespaceScopedDir, namespace, name+".json") + } +} + // getNamespace returns a namespace API object that we should attempt to // create before restoring anything into it. It will come from the backup // tarball if it exists, else will be a new one. If from the tarball, it @@ -562,6 +586,28 @@ func addToResult(r *api.RestoreResult, ns string, e error) { } } +func (ctx *context) getApplicableActions(groupResource schema.GroupResource, namespace string) []resolvedAction { + if actions, ok := ctx.applicableActions[groupResource]; ok { + return actions + } + + var actions []resolvedAction + for _, action := range ctx.actions { + if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) { + continue + } + + if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) { + continue + } + + actions = append(actions, action) + } + + ctx.applicableActions[groupResource] = actions + return actions +} + func (ctx *context) shouldRestore(name string, pvClient client.Dynamic) (bool, error) { pvLogger := ctx.log.WithField("pvName", name) @@ -680,25 +726,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a return warnings, errs } - var ( - resourceClient client.Dynamic - groupResource = schema.ParseGroupResource(resource) - applicableActions []resolvedAction - ) - - // pre-filter the actions based on namespace & resource includes/excludes since - // these will be the same for all items being restored below - for _, action := range ctx.actions { - if !action.resourceIncludesExcludes.ShouldInclude(groupResource.String()) { - continue - } - - if namespace != "" && !action.namespaceIncludesExcludes.ShouldInclude(namespace) { - continue - } - - applicableActions = append(applicableActions, action) - } + groupResource := schema.ParseGroupResource(resource) for _, file := range files { fullPath := filepath.Join(resourcePath, file.Name()) @@ -708,251 +736,337 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a continue } - // make a copy of object retrieved from backup - // to make it available unchanged inside restore actions - itemFromBackup := obj.DeepCopy() - if !ctx.selector.Matches(labels.Set(obj.GetLabels())) { continue } - complete, err := isCompleted(obj, groupResource) - if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error checking completion %q: %v", fullPath, err)) - continue - } - if complete { - ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj)) - continue - } + w, e := ctx.restoreItem(obj, groupResource, namespace) + merge(&warnings, &w) + merge(&errs, &e) + } - if resourceClient == nil { - // initialize client for this Resource. we need - // metadata from an object to do this. - ctx.log.Infof("Getting client for %v", obj.GroupVersionKind()) + return warnings, errs +} - resource := metav1.APIResource{ - Namespaced: len(namespace) > 0, - Name: groupResource.Resource, +func (ctx *context) getResourceClient(groupResource schema.GroupResource, obj *unstructured.Unstructured, namespace string) (client.Dynamic, error) { + key := resourceClientKey{ + resource: groupResource, + namespace: namespace, + } + + if client, ok := ctx.resourceClients[key]; ok { + return client, nil + } + + // initialize client for this Resource. we need + // metadata from an object to do this. + ctx.log.Infof("Getting client for %v", obj.GroupVersionKind()) + + resource := metav1.APIResource{ + Namespaced: len(namespace) > 0, + Name: groupResource.Resource, + } + + client, err := ctx.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace) + if err != nil { + return nil, err + } + + ctx.resourceClients[key] = client + return client, nil +} + +func getResourceID(groupResource schema.GroupResource, namespace, name string) string { + if namespace == "" { + return fmt.Sprintf("%s/%s", groupResource.String(), name) + } + + return fmt.Sprintf("%s/%s/%s", groupResource.String(), namespace, name) +} + +func (ctx *context) restoreItem(obj *unstructured.Unstructured, groupResource schema.GroupResource, namespace string) (api.RestoreResult, api.RestoreResult) { + warnings, errs := api.RestoreResult{}, api.RestoreResult{} + resourceID := getResourceID(groupResource, namespace, obj.GetName()) + + // make a copy of object retrieved from backup + // to make it available unchanged inside restore actions + itemFromBackup := obj.DeepCopy() + + complete, err := isCompleted(obj, groupResource) + if err != nil { + addToResult(&errs, namespace, fmt.Errorf("error checking completion of %q: %v", resourceID, err)) + return warnings, errs + } + if complete { + ctx.log.Infof("%s is complete - skipping", kube.NamespaceAndName(obj)) + return warnings, errs + } + + name := obj.GetName() + + // Check if we've already restored this + itemKey := velero.ResourceIdentifier{ + GroupResource: groupResource, + Namespace: namespace, + Name: name, + } + if _, exists := ctx.restoredItems[itemKey]; exists { + ctx.log.Infof("Skipping %s because it's already been restored.", resourceID) + return warnings, errs + } + ctx.restoredItems[itemKey] = struct{}{} + + // TODO: move to restore item action if/when we add a ShouldRestore() method to the interface + if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" { + ctx.log.Infof("Not restoring pod because it's a mirror pod") + return warnings, errs + } + + resourceClient, err := ctx.getResourceClient(groupResource, obj, namespace) + if err != nil { + addVeleroError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) + return warnings, errs + } + + if groupResource == kuberesource.PersistentVolumes { + var hasSnapshot bool + + if len(ctx.backup.Status.VolumeBackups) > 0 { + // pre-v0.10 backup + _, hasSnapshot = ctx.backup.Status.VolumeBackups[name] + } else { + // v0.10+ backup + for _, snapshot := range ctx.volumeSnapshots { + if snapshot.Spec.PersistentVolumeName == name { + hasSnapshot = true + break + } } + } - var err error - resourceClient, err = ctx.dynamicFactory.ClientForGroupVersionResource(obj.GroupVersionKind().GroupVersion(), resource, namespace) + if !hasSnapshot && hasDeleteReclaimPolicy(obj.Object) { + ctx.log.Infof("Not restoring PV because it doesn't have a snapshot and its reclaim policy is Delete.") + ctx.pvsToProvision.Insert(name) + return warnings, errs + } + + // Check if the PV exists in the cluster before attempting to create + // a volume from the snapshot, in order to avoid orphaned volumes (GH #609) + shouldRestoreSnapshot, err := ctx.shouldRestore(name, resourceClient) + if err != nil { + addToResult(&errs, namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name)) + return warnings, errs + } + + // PV's existence will be recorded later. Just skip the volume restore logic. + if shouldRestoreSnapshot { + // restore the PV from snapshot (if applicable) + updatedObj, err := ctx.pvRestorer.executePVAction(obj) if err != nil { - addVeleroError(&errs, fmt.Errorf("error getting resource client for namespace %q, resource %q: %v", namespace, &groupResource, err)) + addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", resourceID, err)) return warnings, errs } + obj = updatedObj + } else if err != nil { + addToResult(&errs, namespace, fmt.Errorf("error checking existence for PV %s: %v", name, err)) + return warnings, errs + } + } + + // clear out non-core metadata fields & status + if obj, err = resetMetadataAndStatus(obj); err != nil { + addToResult(&errs, namespace, err) + return warnings, errs + } + + for _, action := range ctx.getApplicableActions(groupResource, namespace) { + if !action.selector.Matches(labels.Set(obj.GetLabels())) { + return warnings, errs } - name := obj.GetName() + ctx.log.Infof("Executing item action for %v", &groupResource) - // TODO: move to restore item action if/when we add a ShouldRestore() method to the interface - if groupResource == kuberesource.Pods && obj.GetAnnotations()[v1.MirrorPodAnnotationKey] != "" { - ctx.log.Infof("Not restoring pod because it's a mirror pod") - continue + executeOutput, err := action.Execute(&velero.RestoreItemActionExecuteInput{ + Item: obj, + ItemFromBackup: itemFromBackup, + Restore: ctx.restore, + }) + if executeOutput.Warning != nil { + addToResult(&warnings, namespace, fmt.Errorf("warning preparing %s: %v", resourceID, executeOutput.Warning)) + } + if err != nil { + addToResult(&errs, namespace, fmt.Errorf("error preparing %s: %v", resourceID, err)) + return warnings, errs } - if groupResource == kuberesource.PersistentVolumes { - var hasSnapshot bool + unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured) + if !ok { + addToResult(&errs, namespace, fmt.Errorf("%s: unexpected type %T", resourceID, executeOutput.UpdatedItem)) + return warnings, errs + } - if len(ctx.backup.Status.VolumeBackups) > 0 { - // pre-v0.10 backup - _, hasSnapshot = ctx.backup.Status.VolumeBackups[name] - } else { - // v0.10+ backup - for _, snapshot := range ctx.volumeSnapshots { - if snapshot.Spec.PersistentVolumeName == name { - hasSnapshot = true - break - } - } - } + obj = unstructuredObj + + for _, additionalItem := range executeOutput.AdditionalItems { + itemPath := getItemFilePath(ctx.restoreDir, additionalItem.GroupResource.String(), additionalItem.Namespace, additionalItem.Name) + + if _, err := ctx.fileSystem.Stat(itemPath); err != nil { + ctx.log.WithError(err).WithFields(logrus.Fields{ + "additionalResource": additionalItem.GroupResource.String(), + "additionalResourceNamespace": additionalItem.Namespace, + "additionalResourceName": additionalItem.Name, + }).Warn("unable to restore additional item") + addToResult(&warnings, additionalItem.Namespace, err) - if !hasSnapshot && hasDeleteReclaimPolicy(obj.Object) { - ctx.log.Infof("Not restoring PV because it doesn't have a snapshot and its reclaim policy is Delete.") - ctx.pvsToProvision.Insert(name) continue } - // Check if the PV exists in the cluster before attempting to create - // a volume from the snapshot, in order to avoid orphaned volumes (GH #609) - shouldRestoreSnapshot, err := ctx.shouldRestore(name, resourceClient) - + additionalResourceID := getResourceID(additionalItem.GroupResource, additionalItem.Namespace, additionalItem.Name) + additionalObj, err := ctx.unmarshal(itemPath) if err != nil { - addToResult(&errs, namespace, errors.Wrapf(err, "error waiting on in-cluster persistentvolume %s", name)) - continue + addToResult(&errs, namespace, errors.Wrapf(err, "error restoring additional item %s", additionalResourceID)) } - // PV's existence will be recorded later. Just skip the volume restore logic. - if shouldRestoreSnapshot { - // restore the PV from snapshot (if applicable) - updatedObj, err := ctx.pvRestorer.executePVAction(obj) - if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error executing PVAction for %s: %v", fullPath, err)) - continue + additionalItemNamespace := additionalItem.Namespace + if additionalItemNamespace != "" { + if remapped, ok := ctx.restore.Spec.NamespaceMapping[additionalItemNamespace]; ok { + additionalItemNamespace = remapped } - obj = updatedObj - } else if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error checking existence for PV %s: %v", name, err)) - continue } + + w, e := ctx.restoreItem(additionalObj, additionalItem.GroupResource, additionalItemNamespace) + merge(&warnings, &w) + merge(&errs, &e) } + } - if groupResource == kuberesource.PersistentVolumeClaims { - pvc := new(v1.PersistentVolumeClaim) - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil { - addToResult(&errs, namespace, err) - continue - } - - if pvc.Spec.VolumeName != "" && ctx.pvsToProvision.Has(pvc.Spec.VolumeName) { - ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning because its PV %v has a reclaim policy of Delete", namespace, name, pvc.Spec.VolumeName) - - pvc.Spec.VolumeName = "" - delete(pvc.Annotations, "pv.kubernetes.io/bind-completed") - delete(pvc.Annotations, "pv.kubernetes.io/bound-by-controller") - - res, err := runtime.DefaultUnstructuredConverter.ToUnstructured(pvc) - if err != nil { - addToResult(&errs, namespace, err) - continue - } - obj.Object = res - } - } - - // clear out non-core metadata fields & status - if obj, err = resetMetadataAndStatus(obj); err != nil { + // This comes after running item actions because we have built-in actions that restore + // a PVC's associated PV (if applicable). As part of the PV being restored, the 'pvsToProvision' + // set may be inserted into, and this needs to happen *before* running the following block of logic. + // + // The side effect of this is that it's impossible for a user to write a restore item action that + // adjusts this behavior (i.e. of resetting the PVC for dynamic provisioning if it claims a PV with + // a reclaim policy of Delete and no snapshot). If/when that becomes an issue for users, we can + // revisit. This would be easier with a multi-pass restore process. + if groupResource == kuberesource.PersistentVolumeClaims { + pvc := new(v1.PersistentVolumeClaim) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pvc); err != nil { addToResult(&errs, namespace, err) - continue + return warnings, errs } - for _, action := range applicableActions { - if !action.selector.Matches(labels.Set(obj.GetLabels())) { - continue - } + if pvc.Spec.VolumeName != "" && ctx.pvsToProvision.Has(pvc.Spec.VolumeName) { + ctx.log.Infof("Resetting PersistentVolumeClaim %s/%s for dynamic provisioning because its PV %v has a reclaim policy of Delete", namespace, name, pvc.Spec.VolumeName) - ctx.log.Infof("Executing item action for %v", &groupResource) + // use the unstructured helpers here since we're only deleting and + // the unstructured converter will add back (empty) fields for metadata + // and status that we removed earlier. + unstructured.RemoveNestedField(obj.Object, "spec", "volumeName") + annotations := obj.GetAnnotations() + delete(annotations, "pv.kubernetes.io/bind-completed") + delete(annotations, "pv.kubernetes.io/bound-by-controller") + obj.SetAnnotations(annotations) + } + } - executeOutput, err := action.Execute(&velero.RestoreItemActionExecuteInput{ - Item: obj, - ItemFromBackup: itemFromBackup, - Restore: ctx.restore, - }) - if executeOutput.Warning != nil { - addToResult(&warnings, namespace, fmt.Errorf("warning preparing %s: %v", fullPath, executeOutput.Warning)) - } - if err != nil { - addToResult(&errs, namespace, fmt.Errorf("error preparing %s: %v", fullPath, err)) - continue - } + // necessary because we may have remapped the namespace + // if the namespace is blank, don't create the key + originalNamespace := obj.GetNamespace() + if namespace != "" { + obj.SetNamespace(namespace) + } - unstructuredObj, ok := executeOutput.UpdatedItem.(*unstructured.Unstructured) - if !ok { - addToResult(&errs, namespace, fmt.Errorf("%s: unexpected type %T", fullPath, executeOutput.UpdatedItem)) - continue - } + // label the resource with the restore's name and the restored backup's name + // for easy identification of all cluster resources created by this restore + // and which backup they came from + addRestoreLabels(obj, ctx.restore.Name, ctx.restore.Spec.BackupName) - obj = unstructuredObj + ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name) + createdObj, restoreErr := resourceClient.Create(obj) + if apierrors.IsAlreadyExists(restoreErr) { + fromCluster, err := resourceClient.Get(name, metav1.GetOptions{}) + if err != nil { + ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err) + addToResult(&warnings, namespace, err) + return warnings, errs + } + // Remove insubstantial metadata + fromCluster, err = resetMetadataAndStatus(fromCluster) + if err != nil { + ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err) + addToResult(&warnings, namespace, err) + return warnings, errs } - // necessary because we may have remapped the namespace - // if the namespace is blank, don't create the key - originalNamespace := obj.GetNamespace() - if namespace != "" { - obj.SetNamespace(namespace) - } + // We know the object from the cluster won't have the backup/restore name labels, so + // copy them from the object we attempted to restore. + labels := obj.GetLabels() + addRestoreLabels(fromCluster, labels[api.RestoreNameLabel], labels[api.BackupNameLabel]) - // label the resource with the restore's name and the restored backup's name - // for easy identification of all cluster resources created by this restore - // and which backup they came from - addRestoreLabels(obj, ctx.restore.Name, ctx.restore.Spec.BackupName) - - ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name) - createdObj, restoreErr := resourceClient.Create(obj) - if apierrors.IsAlreadyExists(restoreErr) { - fromCluster, err := resourceClient.Get(name, metav1.GetOptions{}) - if err != nil { - ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err) - addToResult(&warnings, namespace, err) - continue - } - // Remove insubstantial metadata - fromCluster, err = resetMetadataAndStatus(fromCluster) - if err != nil { - ctx.log.Infof("Error trying to reset metadata for %s: %v", kube.NamespaceAndName(obj), err) - addToResult(&warnings, namespace, err) - continue - } - - // We know the object from the cluster won't have the backup/restore name labels, so - // copy them from the object we attempted to restore. - labels := obj.GetLabels() - addRestoreLabels(fromCluster, labels[api.RestoreNameLabel], labels[api.BackupNameLabel]) - - if equality.Semantic.DeepEqual(fromCluster, obj) { - ctx.log.Infof("Skipping restore of %s: %v because it already exists in the cluster and is unchanged from the backed up version", obj.GroupVersionKind().Kind, name) - } else { - switch groupResource { - case kuberesource.ServiceAccounts: - desired, err := mergeServiceAccounts(fromCluster, obj) - if err != nil { - ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) - addToResult(&warnings, namespace, err) - break - } - - patchBytes, err := generatePatch(fromCluster, desired) - if err != nil { - ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) - addToResult(&warnings, namespace, err) - break - } - - if patchBytes == nil { - // In-cluster and desired state are the same, so move on to the next item - break - } - - _, err = resourceClient.Patch(name, patchBytes) - if err != nil { - addToResult(&warnings, namespace, err) - break - } - ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj)) - default: - e := errors.Errorf("not restored: %s and is different from backed up version.", restoreErr) - addToResult(&warnings, namespace, e) + if !equality.Semantic.DeepEqual(fromCluster, obj) { + switch groupResource { + case kuberesource.ServiceAccounts: + desired, err := mergeServiceAccounts(fromCluster, obj) + if err != nil { + ctx.log.Infof("error merging secrets for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) + addToResult(&warnings, namespace, err) + return warnings, errs } + + patchBytes, err := generatePatch(fromCluster, desired) + if err != nil { + ctx.log.Infof("error generating patch for ServiceAccount %s: %v", kube.NamespaceAndName(obj), err) + addToResult(&warnings, namespace, err) + return warnings, errs + } + + if patchBytes == nil { + // In-cluster and desired state are the same, so move on to the next item + return warnings, errs + } + + _, err = resourceClient.Patch(name, patchBytes) + if err != nil { + addToResult(&warnings, namespace, err) + } else { + ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj)) + } + default: + e := errors.Errorf("not restored: %s and is different from backed up version.", restoreErr) + addToResult(&warnings, namespace, e) } - continue - } - // Error was something other than an AlreadyExists - if restoreErr != nil { - ctx.log.Infof("error restoring %s: %v", name, err) - addToResult(&errs, namespace, fmt.Errorf("error restoring %s: %v", fullPath, restoreErr)) - continue + return warnings, errs } - if groupResource == kuberesource.Pods && len(restic.GetPodSnapshotAnnotations(obj)) > 0 { - if ctx.resticRestorer == nil { - ctx.log.Warn("No restic restorer, not restoring pod's volumes") - } else { - ctx.globalWaitGroup.GoErrorSlice(func() []error { - pod := new(v1.Pod) - if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { - ctx.log.WithError(err).Error("error converting unstructured pod") - return []error{err} - } + ctx.log.Infof("Skipping restore of %s: %v because it already exists in the cluster and is unchanged from the backed up version", obj.GroupVersionKind().Kind, name) + return warnings, errs + } - if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, originalNamespace, ctx.backup.Spec.StorageLocation, ctx.log); errs != nil { - ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") - return errs - } + // Error was something other than an AlreadyExists + if restoreErr != nil { + ctx.log.Infof("error restoring %s: %v", name, restoreErr) + addToResult(&errs, namespace, fmt.Errorf("error restoring %s: %v", resourceID, restoreErr)) + return warnings, errs + } - return nil - }) - } + if groupResource == kuberesource.Pods && len(restic.GetPodSnapshotAnnotations(obj)) > 0 { + if ctx.resticRestorer == nil { + ctx.log.Warn("No restic restorer, not restoring pod's volumes") + } else { + ctx.globalWaitGroup.GoErrorSlice(func() []error { + pod := new(v1.Pod) + if err := runtime.DefaultUnstructuredConverter.FromUnstructured(createdObj.UnstructuredContent(), &pod); err != nil { + ctx.log.WithError(err).Error("error converting unstructured pod") + return []error{err} + } + + if errs := ctx.resticRestorer.RestorePodVolumes(ctx.restore, pod, originalNamespace, ctx.backup.Spec.StorageLocation, ctx.log); errs != nil { + ctx.log.WithError(kubeerrs.NewAggregate(errs)).Error("unable to successfully complete restic restores of pod's volumes") + return errs + } + + return nil + }) } } diff --git a/pkg/restore/restore_test.go b/pkg/restore/restore_test.go index e85d714af..d82a66915 100644 --- a/pkg/restore/restore_test.go +++ b/pkg/restore/restore_test.go @@ -1,5 +1,5 @@ /* -Copyright 2017 the Velero contributors. +Copyright 2017, 2019 the Velero contributors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -39,6 +39,7 @@ import ( corev1 "k8s.io/client-go/kubernetes/typed/core/v1" api "github.com/heptio/velero/pkg/apis/velero/v1" + pkgclient "github.com/heptio/velero/pkg/client" "github.com/heptio/velero/pkg/generated/clientset/versioned/fake" informers "github.com/heptio/velero/pkg/generated/informers/externalversions" "github.com/heptio/velero/pkg/kuberesource" @@ -195,11 +196,12 @@ func TestRestoreNamespaceFiltering(t *testing.T) { fileSystem: test.fileSystem, log: log, prioritizedResources: test.prioritizedResources, + restoreDir: test.baseDir, } nsClient.On("Get", mock.Anything, metav1.GetOptions{}).Return(&v1.Namespace{}, nil) - warnings, errors := ctx.restoreFromDir(test.baseDir) + warnings, errors := ctx.restoreFromDir() assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) @@ -291,11 +293,12 @@ func TestRestorePriority(t *testing.T) { fileSystem: test.fileSystem, prioritizedResources: test.prioritizedResources, log: log, + restoreDir: test.baseDir, } nsClient.On("Get", mock.Anything, metav1.GetOptions{}).Return(&v1.Namespace{}, nil) - warnings, errors := ctx.restoreFromDir(test.baseDir) + warnings, errors := ctx.restoreFromDir() assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) @@ -342,13 +345,17 @@ func TestNamespaceRemapping(t *testing.T) { restore: restore, backup: &api.Backup{}, log: velerotest.NewLogger(), + applicableActions: make(map[schema.GroupResource][]resolvedAction), + resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), + restoredItems: make(map[velero.ResourceIdentifier]struct{}), + restoreDir: baseDir, } nsClient.On("Get", "ns-2", metav1.GetOptions{}).Return(&v1.Namespace{}, k8serrors.NewNotFound(schema.GroupResource{Resource: "namespaces"}, "ns-2")) ns := newTestNamespace("ns-2").Namespace nsClient.On("Create", ns).Return(ns, nil) - warnings, errors := ctx.restoreFromDir(baseDir) + warnings, errors := ctx.restoreFromDir() assert.Empty(t, warnings.Velero) assert.Empty(t, warnings.Cluster) @@ -653,6 +660,9 @@ func TestRestoreResourceForNamespace(t *testing.T) { snapshotLocationLister: snapshotLocationLister, backup: &api.Backup{}, }, + applicableActions: make(map[schema.GroupResource][]resolvedAction), + resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), + restoredItems: make(map[velero.ResourceIdentifier]struct{}), } warnings, errors := ctx.restoreResource(test.resourcePath, test.namespace, test.resourcePath) @@ -736,8 +746,11 @@ func TestRestoringExistingServiceAccount(t *testing.T) { BackupName: "my-backup", }, }, - backup: &api.Backup{}, - log: velerotest.NewLogger(), + backup: &api.Backup{}, + log: velerotest.NewLogger(), + applicableActions: make(map[schema.GroupResource][]resolvedAction), + resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), + restoredItems: make(map[velero.ResourceIdentifier]struct{}), } warnings, errors := ctx.restoreResource("serviceaccounts", "ns-1", "foo/resources/serviceaccounts/namespaces/ns-1/") @@ -994,11 +1007,14 @@ status: Name: "my-restore", }, }, - backup: backup, - log: velerotest.NewLogger(), - pvsToProvision: sets.NewString(), - pvRestorer: pvRestorer, - namespaceClient: nsClient, + backup: backup, + log: velerotest.NewLogger(), + pvsToProvision: sets.NewString(), + pvRestorer: pvRestorer, + namespaceClient: nsClient, + applicableActions: make(map[schema.GroupResource][]resolvedAction), + resourceClients: make(map[resourceClientKey]pkgclient.Dynamic), + restoredItems: make(map[velero.ResourceIdentifier]struct{}), } if test.haveSnapshot && !test.legacyBackup { @@ -1521,10 +1537,17 @@ status: assert.Equal(t, test.expectedResult, result) }) - } } +func TestGetItemFilePath(t *testing.T) { + res := getItemFilePath("root", "resource", "", "item") + assert.Equal(t, "root/resources/resource/cluster/item.json", res) + + res = getItemFilePath("root", "resource", "namespace", "item") + assert.Equal(t, "root/resources/resource/namespaces/namespace/item.json", res) +} + type testUnstructured struct { *unstructured.Unstructured }