mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-05 13:05:17 +00:00
simplify group/resource/item backupper structure
Signed-off-by: Steve Kriss <krisss@vmware.com>
This commit is contained in:
@@ -59,7 +59,6 @@ type kubernetesBackupper struct {
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
groupBackupperFactory groupBackupperFactory
|
||||
resticBackupperFactory restic.BackupperFactory
|
||||
resticTimeout time.Duration
|
||||
}
|
||||
@@ -98,7 +97,6 @@ func NewKubernetesBackupper(
|
||||
discoveryHelper: discoveryHelper,
|
||||
dynamicFactory: dynamicFactory,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
groupBackupperFactory: &defaultGroupBackupperFactory{},
|
||||
resticBackupperFactory: resticBackupperFactory,
|
||||
resticTimeout: resticTimeout,
|
||||
}, nil
|
||||
@@ -264,25 +262,36 @@ func (kb *kubernetesBackupper) Backup(log logrus.FieldLogger, backupRequest *Req
|
||||
}
|
||||
}
|
||||
|
||||
gb := kb.groupBackupperFactory.newGroupBackupper(
|
||||
log,
|
||||
backupRequest,
|
||||
kb.dynamicFactory,
|
||||
kb.discoveryHelper,
|
||||
cohabitatingResources(),
|
||||
kb.podCommandExecutor,
|
||||
tw,
|
||||
resticBackupper,
|
||||
newPVCSnapshotTracker(),
|
||||
volumeSnapshotterGetter,
|
||||
)
|
||||
|
||||
for _, group := range kb.discoveryHelper.Resources() {
|
||||
if err := gb.backupGroup(group); err != nil {
|
||||
log.WithError(err).WithField("apiGroup", group.String()).Error("Error backing up API group")
|
||||
pvcSnapshotTracker := newPVCSnapshotTracker()
|
||||
newItemBackupper := func() ItemBackupper {
|
||||
itemBackupper := &defaultItemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tw,
|
||||
dynamicFactory: kb.dynamicFactory,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
itemHookHandler: &defaultItemHookHandler{
|
||||
podCommandExecutor: kb.podCommandExecutor,
|
||||
},
|
||||
}
|
||||
itemBackupper.additionalItemBackupper = itemBackupper
|
||||
|
||||
return itemBackupper
|
||||
}
|
||||
|
||||
resourceBackupper := &resourceBackupper{
|
||||
log: log,
|
||||
backupRequest: backupRequest,
|
||||
discoveryHelper: kb.discoveryHelper,
|
||||
dynamicFactory: kb.dynamicFactory,
|
||||
cohabitatingResources: cohabitatingResources(),
|
||||
newItemBackupper: newItemBackupper,
|
||||
}
|
||||
|
||||
resourceBackupper.backupAllGroups()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -2690,9 +2690,8 @@ func newHarness(t *testing.T) *harness {
|
||||
return &harness{
|
||||
APIServer: apiServer,
|
||||
backupper: &kubernetesBackupper{
|
||||
dynamicFactory: client.NewDynamicFactory(apiServer.DynamicClient),
|
||||
discoveryHelper: discoveryHelper,
|
||||
groupBackupperFactory: new(defaultGroupBackupperFactory),
|
||||
dynamicFactory: client.NewDynamicFactory(apiServer.DynamicClient),
|
||||
discoveryHelper: discoveryHelper,
|
||||
|
||||
// unsupported
|
||||
podCommandExecutor: nil,
|
||||
|
||||
@@ -1,181 +0,0 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
"github.com/vmware-tanzu/velero/pkg/podexec"
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
)
|
||||
|
||||
type groupBackupperFactory interface {
|
||||
newGroupBackupper(
|
||||
log logrus.FieldLogger,
|
||||
backupRequest *Request,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) groupBackupper
|
||||
}
|
||||
|
||||
type defaultGroupBackupperFactory struct{}
|
||||
|
||||
func (f *defaultGroupBackupperFactory) newGroupBackupper(
|
||||
log logrus.FieldLogger,
|
||||
backupRequest *Request,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) groupBackupper {
|
||||
return &defaultGroupBackupper{
|
||||
log: log,
|
||||
backupRequest: backupRequest,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
cohabitatingResources: cohabitatingResources,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
tarWriter: tarWriter,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: resticSnapshotTracker,
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
|
||||
resourceBackupperFactory: &defaultResourceBackupperFactory{},
|
||||
}
|
||||
}
|
||||
|
||||
type groupBackupper interface {
|
||||
backupGroup(group *metav1.APIResourceList) error
|
||||
}
|
||||
|
||||
type defaultGroupBackupper struct {
|
||||
log logrus.FieldLogger
|
||||
backupRequest *Request
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
cohabitatingResources map[string]*cohabitatingResource
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
tarWriter tarWriter
|
||||
resticBackupper restic.Backupper
|
||||
resticSnapshotTracker *pvcSnapshotTracker
|
||||
resourceBackupperFactory resourceBackupperFactory
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
}
|
||||
|
||||
// backupGroup backs up a single API group.
|
||||
func (gb *defaultGroupBackupper) backupGroup(group *metav1.APIResourceList) error {
|
||||
log := gb.log.WithField("group", group.GroupVersion)
|
||||
|
||||
log.Infof("Backing up group")
|
||||
|
||||
// Parse so we can check if this is the core group
|
||||
gv, err := schema.ParseGroupVersion(group.GroupVersion)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing GroupVersion %q", group.GroupVersion)
|
||||
}
|
||||
if gv.Group == "" {
|
||||
// This is the core group, so make sure we process in the following order: pods, pvcs, pvs,
|
||||
// everything else.
|
||||
sortCoreGroup(group)
|
||||
}
|
||||
|
||||
rb := gb.resourceBackupperFactory.newResourceBackupper(
|
||||
log,
|
||||
gb.backupRequest,
|
||||
gb.dynamicFactory,
|
||||
gb.discoveryHelper,
|
||||
gb.cohabitatingResources,
|
||||
gb.podCommandExecutor,
|
||||
gb.tarWriter,
|
||||
gb.resticBackupper,
|
||||
gb.resticSnapshotTracker,
|
||||
gb.volumeSnapshotterGetter,
|
||||
)
|
||||
|
||||
for _, resource := range group.APIResources {
|
||||
if err := rb.backupResource(group, resource); err != nil {
|
||||
log.WithError(err).WithField("resource", resource.String()).Error("Error backing up API resource")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sortCoreGroup sorts group as a coreGroup.
|
||||
func sortCoreGroup(group *metav1.APIResourceList) {
|
||||
sort.Stable(coreGroup(group.APIResources))
|
||||
}
|
||||
|
||||
// coreGroup is used to sort APIResources in the core API group. The sort order is pods, pvcs, pvs,
|
||||
// then everything else.
|
||||
type coreGroup []metav1.APIResource
|
||||
|
||||
func (c coreGroup) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
func (c coreGroup) Less(i, j int) bool {
|
||||
return coreGroupResourcePriority(c[i].Name) < coreGroupResourcePriority(c[j].Name)
|
||||
}
|
||||
|
||||
func (c coreGroup) Swap(i, j int) {
|
||||
c[j], c[i] = c[i], c[j]
|
||||
}
|
||||
|
||||
// These constants represent the relative priorities for resources in the core API group. We want to
|
||||
// ensure that we process pods, then pvcs, then pvs, then anything else. This ensures that when a
|
||||
// pod is backed up, we can perform a pre hook, then process pvcs and pvs (including taking a
|
||||
// snapshot), then perform a post hook on the pod.
|
||||
const (
|
||||
pod = iota
|
||||
pvc
|
||||
pv
|
||||
other
|
||||
)
|
||||
|
||||
// coreGroupResourcePriority returns the relative priority of the resource, in the following order:
|
||||
// pods, pvcs, pvs, everything else.
|
||||
func coreGroupResourcePriority(resource string) int {
|
||||
switch strings.ToLower(resource) {
|
||||
case "pods":
|
||||
return pod
|
||||
case "persistentvolumeclaims":
|
||||
return pvc
|
||||
case "persistentvolumes":
|
||||
return pv
|
||||
}
|
||||
|
||||
return other
|
||||
}
|
||||
@@ -33,63 +33,17 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/velero"
|
||||
"github.com/vmware-tanzu/velero/pkg/podexec"
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/boolptr"
|
||||
"github.com/vmware-tanzu/velero/pkg/volume"
|
||||
)
|
||||
|
||||
type itemBackupperFactory interface {
|
||||
newItemBackupper(
|
||||
backup *Request,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) ItemBackupper
|
||||
}
|
||||
|
||||
type defaultItemBackupperFactory struct{}
|
||||
|
||||
func (f *defaultItemBackupperFactory) newItemBackupper(
|
||||
backupRequest *Request,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) ItemBackupper {
|
||||
ib := &defaultItemBackupper{
|
||||
backupRequest: backupRequest,
|
||||
tarWriter: tarWriter,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: resticSnapshotTracker,
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
|
||||
itemHookHandler: &defaultItemHookHandler{
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
},
|
||||
}
|
||||
|
||||
// this is for testing purposes
|
||||
ib.additionalItemBackupper = ib
|
||||
|
||||
return ib
|
||||
}
|
||||
|
||||
// ItemBackupper can back up individual items to a tar writer.
|
||||
type ItemBackupper interface {
|
||||
backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource, preferredGVR schema.GroupVersionResource) (bool, error)
|
||||
}
|
||||
@@ -268,13 +222,13 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
||||
versionPath := version
|
||||
|
||||
if version == preferredVersion {
|
||||
versionPath = version + api.PreferredVersionDir
|
||||
versionPath = version + velerov1api.PreferredVersionDir
|
||||
}
|
||||
|
||||
if namespace != "" {
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), versionPath, api.NamespaceScopedDir, namespace, name+".json")
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), versionPath, velerov1api.NamespaceScopedDir, namespace, name+".json")
|
||||
} else {
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), versionPath, api.ClusterScopedDir, name+".json")
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), versionPath, velerov1api.ClusterScopedDir, name+".json")
|
||||
}
|
||||
|
||||
itemBytes, err := json.Marshal(obj.UnstructuredContent())
|
||||
@@ -302,9 +256,9 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
||||
|
||||
if version == preferredVersion {
|
||||
if namespace != "" {
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.NamespaceScopedDir, namespace, name+".json")
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), velerov1api.NamespaceScopedDir, namespace, name+".json")
|
||||
} else {
|
||||
filePath = filepath.Join(api.ResourcesDir, groupResource.String(), api.ClusterScopedDir, name+".json")
|
||||
filePath = filepath.Join(velerov1api.ResourcesDir, groupResource.String(), velerov1api.ClusterScopedDir, name+".json")
|
||||
}
|
||||
|
||||
hdr = &tar.Header{
|
||||
@@ -406,7 +360,7 @@ func (ib *defaultItemBackupper) executeActions(
|
||||
|
||||
// volumeSnapshotter instantiates and initializes a VolumeSnapshotter given a VolumeSnapshotLocation,
|
||||
// or returns an existing one if one's already been initialized for the location.
|
||||
func (ib *defaultItemBackupper) volumeSnapshotter(snapshotLocation *api.VolumeSnapshotLocation) (velero.VolumeSnapshotter, error) {
|
||||
func (ib *defaultItemBackupper) volumeSnapshotter(snapshotLocation *velerov1api.VolumeSnapshotLocation) (velero.VolumeSnapshotter, error) {
|
||||
if bs, ok := ib.snapshotLocationVolumeSnapshotters[snapshotLocation.Name]; ok {
|
||||
return bs, nil
|
||||
}
|
||||
@@ -543,7 +497,7 @@ func (ib *defaultItemBackupper) takePVSnapshot(obj runtime.Unstructured, log log
|
||||
return kubeerrs.NewAggregate(errs)
|
||||
}
|
||||
|
||||
func volumeSnapshot(backup *api.Backup, volumeName, volumeID, volumeType, az, location string, iops *int64) *volume.Snapshot {
|
||||
func volumeSnapshot(backup *velerov1api.Backup, volumeName, volumeID, volumeType, az, location string, iops *int64) *volume.Snapshot {
|
||||
return &volume.Snapshot{
|
||||
Spec: volume.SnapshotSpec{
|
||||
BackupName: backup.Name,
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017 the Velero contributors.
|
||||
Copyright 2017, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
@@ -17,6 +17,9 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
@@ -30,77 +33,58 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/podexec"
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/collections"
|
||||
)
|
||||
|
||||
type resourceBackupperFactory interface {
|
||||
newResourceBackupper(
|
||||
log logrus.FieldLogger,
|
||||
backupRequest *Request,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) resourceBackupper
|
||||
// resourceBackupper collects resources from the Kubernetes API according to
|
||||
// the backup spec and passes them to an itemBackupper to be backed up.
|
||||
type resourceBackupper struct {
|
||||
log logrus.FieldLogger
|
||||
backupRequest *Request
|
||||
discoveryHelper discovery.Helper
|
||||
dynamicFactory client.DynamicFactory
|
||||
cohabitatingResources map[string]*cohabitatingResource
|
||||
newItemBackupper func() ItemBackupper
|
||||
}
|
||||
|
||||
type defaultResourceBackupperFactory struct{}
|
||||
|
||||
func (f *defaultResourceBackupperFactory) newResourceBackupper(
|
||||
log logrus.FieldLogger,
|
||||
backupRequest *Request,
|
||||
dynamicFactory client.DynamicFactory,
|
||||
discoveryHelper discovery.Helper,
|
||||
cohabitatingResources map[string]*cohabitatingResource,
|
||||
podCommandExecutor podexec.PodCommandExecutor,
|
||||
tarWriter tarWriter,
|
||||
resticBackupper restic.Backupper,
|
||||
resticSnapshotTracker *pvcSnapshotTracker,
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter,
|
||||
) resourceBackupper {
|
||||
return &defaultResourceBackupper{
|
||||
log: log,
|
||||
backupRequest: backupRequest,
|
||||
dynamicFactory: dynamicFactory,
|
||||
discoveryHelper: discoveryHelper,
|
||||
cohabitatingResources: cohabitatingResources,
|
||||
podCommandExecutor: podCommandExecutor,
|
||||
tarWriter: tarWriter,
|
||||
resticBackupper: resticBackupper,
|
||||
resticSnapshotTracker: resticSnapshotTracker,
|
||||
volumeSnapshotterGetter: volumeSnapshotterGetter,
|
||||
|
||||
itemBackupperFactory: &defaultItemBackupperFactory{},
|
||||
// collect backs up all API groups.
|
||||
func (r *resourceBackupper) backupAllGroups() {
|
||||
for _, group := range r.discoveryHelper.Resources() {
|
||||
if err := r.backupGroup(r.log, group); err != nil {
|
||||
r.log.WithError(err).WithField("apiGroup", group.String()).Error("Error backing up API group")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type resourceBackupper interface {
|
||||
backupResource(group *metav1.APIResourceList, resource metav1.APIResource) error
|
||||
}
|
||||
// backupGroup backs up a single API group.
|
||||
func (r *resourceBackupper) backupGroup(log logrus.FieldLogger, group *metav1.APIResourceList) error {
|
||||
log = log.WithField("group", group.GroupVersion)
|
||||
|
||||
type defaultResourceBackupper struct {
|
||||
log logrus.FieldLogger
|
||||
backupRequest *Request
|
||||
dynamicFactory client.DynamicFactory
|
||||
discoveryHelper discovery.Helper
|
||||
cohabitatingResources map[string]*cohabitatingResource
|
||||
podCommandExecutor podexec.PodCommandExecutor
|
||||
tarWriter tarWriter
|
||||
resticBackupper restic.Backupper
|
||||
resticSnapshotTracker *pvcSnapshotTracker
|
||||
itemBackupperFactory itemBackupperFactory
|
||||
volumeSnapshotterGetter VolumeSnapshotterGetter
|
||||
log.Infof("Backing up group")
|
||||
|
||||
// Parse so we can check if this is the core group
|
||||
gv, err := schema.ParseGroupVersion(group.GroupVersion)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "error parsing GroupVersion %q", group.GroupVersion)
|
||||
}
|
||||
if gv.Group == "" {
|
||||
// This is the core group, so make sure we process in the following order: pods, pvcs, pvs,
|
||||
// everything else.
|
||||
sortCoreGroup(group)
|
||||
}
|
||||
|
||||
for _, resource := range group.APIResources {
|
||||
if err := r.backupResource(log, group, resource); err != nil {
|
||||
log.WithError(err).WithField("resource", resource.String()).Error("Error backing up API resource")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// backupResource backs up all the objects for a given group-version-resource.
|
||||
func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList, resource metav1.APIResource) error {
|
||||
log := rb.log.WithField("resource", resource.Name)
|
||||
func (r *resourceBackupper) backupResource(log logrus.FieldLogger, group *metav1.APIResourceList, resource metav1.APIResource) error {
|
||||
log = log.WithField("resource", resource.Name)
|
||||
|
||||
log.Info("Backing up resource")
|
||||
|
||||
@@ -111,7 +95,7 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
||||
gr := schema.GroupResource{Group: gv.Group, Resource: resource.Name}
|
||||
|
||||
// Getting the preferred group version of this resource
|
||||
preferredGVR, _, err := rb.discoveryHelper.ResourceFor(gr.WithVersion(""))
|
||||
preferredGVR, _, err := r.discoveryHelper.ResourceFor(gr.WithVersion(""))
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
@@ -121,8 +105,8 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
||||
// If the resource we are backing up is NOT namespaces, and it is cluster-scoped, check to see if
|
||||
// we should include it based on the IncludeClusterResources setting.
|
||||
if gr != kuberesource.Namespaces && clusterScoped {
|
||||
if rb.backupRequest.Spec.IncludeClusterResources == nil {
|
||||
if !rb.backupRequest.NamespaceIncludesExcludes.IncludeEverything() {
|
||||
if r.backupRequest.Spec.IncludeClusterResources == nil {
|
||||
if !r.backupRequest.NamespaceIncludesExcludes.IncludeEverything() {
|
||||
// when IncludeClusterResources == nil (auto), only directly
|
||||
// back up cluster-scoped resources if we're doing a full-cluster
|
||||
// (all namespaces) backup. Note that in the case of a subset of
|
||||
@@ -133,18 +117,18 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
||||
log.Info("Skipping resource because it's cluster-scoped and only specific namespaces are included in the backup")
|
||||
return nil
|
||||
}
|
||||
} else if !*rb.backupRequest.Spec.IncludeClusterResources {
|
||||
} else if !*r.backupRequest.Spec.IncludeClusterResources {
|
||||
log.Info("Skipping resource because it's cluster-scoped")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if !rb.backupRequest.ResourceIncludesExcludes.ShouldInclude(gr.String()) {
|
||||
if !r.backupRequest.ResourceIncludesExcludes.ShouldInclude(gr.String()) {
|
||||
log.Infof("Skipping resource because it's excluded")
|
||||
return nil
|
||||
}
|
||||
|
||||
if cohabitator, found := rb.cohabitatingResources[resource.Name]; found {
|
||||
if cohabitator, found := r.cohabitatingResources[resource.Name]; found {
|
||||
if cohabitator.seen {
|
||||
log.WithFields(
|
||||
logrus.Fields{
|
||||
@@ -157,28 +141,19 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
||||
cohabitator.seen = true
|
||||
}
|
||||
|
||||
itemBackupper := rb.itemBackupperFactory.newItemBackupper(
|
||||
rb.backupRequest,
|
||||
rb.podCommandExecutor,
|
||||
rb.tarWriter,
|
||||
rb.dynamicFactory,
|
||||
rb.discoveryHelper,
|
||||
rb.resticBackupper,
|
||||
rb.resticSnapshotTracker,
|
||||
rb.volumeSnapshotterGetter,
|
||||
)
|
||||
itemBackupper := r.newItemBackupper()
|
||||
|
||||
namespacesToList := getNamespacesToList(rb.backupRequest.NamespaceIncludesExcludes)
|
||||
namespacesToList := getNamespacesToList(r.backupRequest.NamespaceIncludesExcludes)
|
||||
|
||||
// Check if we're backing up namespaces, and only certain ones
|
||||
if gr == kuberesource.Namespaces && namespacesToList[0] != "" {
|
||||
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
|
||||
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting dynamic client")
|
||||
} else {
|
||||
var labelSelector labels.Selector
|
||||
if rb.backupRequest.Spec.LabelSelector != nil {
|
||||
labelSelector, err = metav1.LabelSelectorAsSelector(rb.backupRequest.Spec.LabelSelector)
|
||||
if r.backupRequest.Spec.LabelSelector != nil {
|
||||
labelSelector, err = metav1.LabelSelectorAsSelector(r.backupRequest.Spec.LabelSelector)
|
||||
if err != nil {
|
||||
// This should never happen...
|
||||
return errors.Wrap(err, "invalid label selector")
|
||||
@@ -218,14 +193,14 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
||||
for _, namespace := range namespacesToList {
|
||||
log = log.WithField("namespace", namespace)
|
||||
|
||||
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace)
|
||||
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Error getting dynamic client")
|
||||
continue
|
||||
}
|
||||
|
||||
var labelSelector string
|
||||
if selector := rb.backupRequest.Spec.LabelSelector; selector != nil {
|
||||
if selector := r.backupRequest.Spec.LabelSelector; selector != nil {
|
||||
labelSelector = metav1.FormatLabelSelector(selector)
|
||||
}
|
||||
|
||||
@@ -239,7 +214,7 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
||||
|
||||
// do the backup
|
||||
for _, item := range unstructuredList.Items {
|
||||
if rb.backupItem(log, gr, itemBackupper, &item, preferredGVR) {
|
||||
if r.backupItem(log, gr, itemBackupper, &item, preferredGVR) {
|
||||
backedUpItem = true
|
||||
}
|
||||
}
|
||||
@@ -248,14 +223,14 @@ func (rb *defaultResourceBackupper) backupResource(group *metav1.APIResourceList
|
||||
// back up CRD for resource if found. We should only need to do this if we've backed up at least
|
||||
// one item and IncludeClusterResources is nil. If IncludeClusterResources is false
|
||||
// we don't want to back it up, and if it's true it will already be included.
|
||||
if backedUpItem && rb.backupRequest.Spec.IncludeClusterResources == nil {
|
||||
rb.backupCRD(log, gr, itemBackupper)
|
||||
if backedUpItem && r.backupRequest.Spec.IncludeClusterResources == nil {
|
||||
r.backupCRD(log, gr, itemBackupper)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rb *defaultResourceBackupper) backupItem(
|
||||
func (r *resourceBackupper) backupItem(
|
||||
log logrus.FieldLogger,
|
||||
gr schema.GroupResource,
|
||||
itemBackupper ItemBackupper,
|
||||
@@ -273,7 +248,7 @@ func (rb *defaultResourceBackupper) backupItem(
|
||||
"name": metadata.GetName(),
|
||||
})
|
||||
|
||||
if gr == kuberesource.Namespaces && !rb.backupRequest.NamespaceIncludesExcludes.ShouldInclude(metadata.GetName()) {
|
||||
if gr == kuberesource.Namespaces && !r.backupRequest.NamespaceIncludesExcludes.ShouldInclude(metadata.GetName()) {
|
||||
log.Info("Skipping namespace because it's excluded")
|
||||
return false
|
||||
}
|
||||
@@ -298,11 +273,11 @@ func (rb *defaultResourceBackupper) backupItem(
|
||||
|
||||
// backupCRD checks if the resource is a custom resource, and if so, backs up the custom resource definition
|
||||
// associated with it.
|
||||
func (rb *defaultResourceBackupper) backupCRD(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper ItemBackupper) {
|
||||
func (r *resourceBackupper) backupCRD(log logrus.FieldLogger, gr schema.GroupResource, itemBackupper ItemBackupper) {
|
||||
crdGroupResource := kuberesource.CustomResourceDefinitions
|
||||
|
||||
log.Debugf("Getting server preferred API version for %s", crdGroupResource)
|
||||
gvr, apiResource, err := rb.discoveryHelper.ResourceFor(crdGroupResource.WithVersion(""))
|
||||
gvr, apiResource, err := r.discoveryHelper.ResourceFor(crdGroupResource.WithVersion(""))
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting resolved resource for %s", crdGroupResource)
|
||||
return
|
||||
@@ -310,7 +285,7 @@ func (rb *defaultResourceBackupper) backupCRD(log logrus.FieldLogger, gr schema.
|
||||
log.Debugf("Got server preferred API version %s for %s", gvr.Version, crdGroupResource)
|
||||
|
||||
log.Debugf("Getting dynamic client for %s", gvr.String())
|
||||
crdClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), apiResource, "")
|
||||
crdClient, err := r.dynamicFactory.ClientForGroupVersionResource(gvr.GroupVersion(), apiResource, "")
|
||||
if err != nil {
|
||||
log.WithError(errors.WithStack(err)).Errorf("Error getting dynamic client for %s", crdGroupResource)
|
||||
return
|
||||
@@ -331,7 +306,40 @@ func (rb *defaultResourceBackupper) backupCRD(log logrus.FieldLogger, gr schema.
|
||||
}
|
||||
log.Infof("Found associated CRD %s to add to backup", gr.String())
|
||||
|
||||
rb.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured, gvr)
|
||||
r.backupItem(log, gvr.GroupResource(), itemBackupper, unstructured, gvr)
|
||||
}
|
||||
|
||||
// sortCoreGroup sorts the core API group.
|
||||
func sortCoreGroup(group *metav1.APIResourceList) {
|
||||
sort.SliceStable(group.APIResources, func(i, j int) bool {
|
||||
return coreGroupResourcePriority(group.APIResources[i].Name) < coreGroupResourcePriority(group.APIResources[j].Name)
|
||||
})
|
||||
}
|
||||
|
||||
// These constants represent the relative priorities for resources in the core API group. We want to
|
||||
// ensure that we process pods, then pvcs, then pvs, then anything else. This ensures that when a
|
||||
// pod is backed up, we can perform a pre hook, then process pvcs and pvs (including taking a
|
||||
// snapshot), then perform a post hook on the pod.
|
||||
const (
|
||||
pod = iota
|
||||
pvc
|
||||
pv
|
||||
other
|
||||
)
|
||||
|
||||
// coreGroupResourcePriority returns the relative priority of the resource, in the following order:
|
||||
// pods, pvcs, pvs, everything else.
|
||||
func coreGroupResourcePriority(resource string) int {
|
||||
switch strings.ToLower(resource) {
|
||||
case "pods":
|
||||
return pod
|
||||
case "persistentvolumeclaims":
|
||||
return pvc
|
||||
case "persistentvolumes":
|
||||
return pv
|
||||
}
|
||||
|
||||
return other
|
||||
}
|
||||
|
||||
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
/*
|
||||
Copyright 2017, 2019 the Velero contributors.
|
||||
Copyright 2017, 2019, 2020 the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
Reference in New Issue
Block a user