mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-04 12:14:00 +00:00
Consolidate commonly used GroupResource objects
Signed-off-by: Nolan Brubaker <nolan@heptio.com>
This commit is contained in:
@@ -20,9 +20,9 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
)
|
||||
|
||||
@@ -36,8 +36,6 @@ func NewBackupPVAction(log logrus.FieldLogger) ItemAction {
|
||||
return &backupPVAction{log: log}
|
||||
}
|
||||
|
||||
var pvGroupResource = schema.GroupResource{Group: "", Resource: "persistentvolumes"}
|
||||
|
||||
func (a *backupPVAction) AppliesTo() (ResourceSelector, error) {
|
||||
return ResourceSelector{
|
||||
IncludedResources: []string{"persistentvolumeclaims"},
|
||||
@@ -63,7 +61,7 @@ func (a *backupPVAction) Execute(item runtime.Unstructured, backup *v1.Backup) (
|
||||
}
|
||||
|
||||
additionalItems = append(additionalItems, ResourceIdentifier{
|
||||
GroupResource: pvGroupResource,
|
||||
GroupResource: kuberesource.PersistentVolumes,
|
||||
Name: volumeName,
|
||||
})
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -56,5 +57,5 @@ func TestBackupPVAction(t *testing.T) {
|
||||
_, additional, err = a.Execute(pvc, backup)
|
||||
require.NoError(t, err)
|
||||
require.Len(t, additional, 1)
|
||||
assert.Equal(t, ResourceIdentifier{GroupResource: pvGroupResource, Name: "myVolume"}, additional[0])
|
||||
assert.Equal(t, ResourceIdentifier{GroupResource: kuberesource.PersistentVolumes, Name: "myVolume"}, additional[0])
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
@@ -110,9 +111,6 @@ type defaultItemBackupper struct {
|
||||
additionalItemBackupper ItemBackupper
|
||||
}
|
||||
|
||||
var podsGroupResource = schema.GroupResource{Group: "", Resource: "pods"}
|
||||
var namespacesGroupResource = schema.GroupResource{Group: "", Resource: "namespaces"}
|
||||
|
||||
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
|
||||
// namespaces IncludesExcludes list.
|
||||
func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtime.Unstructured, groupResource schema.GroupResource) error {
|
||||
@@ -138,7 +136,7 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
||||
|
||||
// NOTE: we specifically allow namespaces to be backed up even if IncludeClusterResources is
|
||||
// false.
|
||||
if namespace == "" && groupResource != namespacesGroupResource && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
|
||||
if namespace == "" && groupResource != kuberesource.Namespaces && ib.backup.Spec.IncludeClusterResources != nil && !*ib.backup.Spec.IncludeClusterResources {
|
||||
log.Info("Excluding item because resource is cluster-scoped and backup.spec.includeClusterResources is false")
|
||||
return nil
|
||||
}
|
||||
@@ -220,7 +218,7 @@ func (ib *defaultItemBackupper) backupItem(logger logrus.FieldLogger, obj runtim
|
||||
}
|
||||
}
|
||||
|
||||
if groupResource == pvGroupResource {
|
||||
if groupResource == kuberesource.PersistentVolumes {
|
||||
if ib.snapshotService == nil {
|
||||
log.Debug("Skipping Persistent Volume snapshot because they're not enabled.")
|
||||
} else {
|
||||
|
||||
@@ -22,6 +22,7 @@ import (
|
||||
"time"
|
||||
|
||||
api "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -67,7 +68,7 @@ func (h *defaultItemHookHandler) handleHooks(
|
||||
phase hookPhase,
|
||||
) error {
|
||||
// We only support hooks on pods right now
|
||||
if groupResource != podsGroupResource {
|
||||
if groupResource != kuberesource.Pods {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -117,7 +118,7 @@ func (h *defaultItemHookHandler) handleHooks(
|
||||
hooks = resourceHook.post
|
||||
}
|
||||
for _, hook := range hooks {
|
||||
if groupResource == podsGroupResource {
|
||||
if groupResource == kuberesource.Pods {
|
||||
if hook.Exec != nil {
|
||||
hookLog := log.WithFields(
|
||||
logrus.Fields{
|
||||
|
||||
@@ -22,9 +22,9 @@ import (
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
)
|
||||
|
||||
@@ -38,8 +38,6 @@ func NewPodAction(log logrus.FieldLogger) ItemAction {
|
||||
return &podAction{log: log}
|
||||
}
|
||||
|
||||
var pvcGroupResource = schema.GroupResource{Group: "", Resource: "persistentvolumeclaims"}
|
||||
|
||||
// AppliesTo returns a ResourceSelector that applies only to pods.
|
||||
func (a *podAction) AppliesTo() (ResourceSelector, error) {
|
||||
return ResourceSelector{
|
||||
@@ -92,7 +90,7 @@ func (a *podAction) Execute(item runtime.Unstructured, backup *v1.Backup) (runti
|
||||
a.log.Infof("Adding pvc %s to additionalItems", claimName)
|
||||
|
||||
additionalItems = append(additionalItems, ResourceIdentifier{
|
||||
GroupResource: pvcGroupResource,
|
||||
GroupResource: kuberesource.PersistentVolumeClaims,
|
||||
Namespace: metadata.GetNamespace(),
|
||||
Name: claimName,
|
||||
})
|
||||
|
||||
@@ -19,6 +19,7 @@ package backup
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -108,8 +109,8 @@ func TestPodActionExecute(t *testing.T) {
|
||||
}
|
||||
`),
|
||||
expected: []ResourceIdentifier{
|
||||
{GroupResource: pvcGroupResource, Namespace: "foo", Name: "claim1"},
|
||||
{GroupResource: pvcGroupResource, Namespace: "foo", Name: "claim2"},
|
||||
{GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "foo", Name: "claim1"},
|
||||
{GroupResource: kuberesource.PersistentVolumeClaims, Namespace: "foo", Name: "claim2"},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -132,7 +133,7 @@ func (rb *defaultResourceBackupper) backupResource(
|
||||
|
||||
// If the resource we are backing up is NOT namespaces, and it is cluster-scoped, check to see if
|
||||
// we should include it based on the IncludeClusterResources setting.
|
||||
if gr != namespacesGroupResource && clusterScoped {
|
||||
if gr != kuberesource.Namespaces && clusterScoped {
|
||||
if rb.backup.Spec.IncludeClusterResources == nil {
|
||||
if !rb.namespaces.IncludeEverything() {
|
||||
// when IncludeClusterResources == nil (auto), only directly
|
||||
@@ -186,7 +187,7 @@ func (rb *defaultResourceBackupper) backupResource(
|
||||
namespacesToList := getNamespacesToList(rb.namespaces)
|
||||
|
||||
// Check if we're backing up namespaces, and only certain ones
|
||||
if gr == namespacesGroupResource && namespacesToList[0] != "" {
|
||||
if gr == kuberesource.Namespaces && namespacesToList[0] != "" {
|
||||
resourceClient, err := rb.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -260,7 +261,7 @@ func (rb *defaultResourceBackupper) backupResource(
|
||||
continue
|
||||
}
|
||||
|
||||
if gr == namespacesGroupResource && !rb.namespaces.ShouldInclude(metadata.GetName()) {
|
||||
if gr == kuberesource.Namespaces && !rb.namespaces.ShouldInclude(metadata.GetName()) {
|
||||
log.WithField("name", metadata.GetName()).Info("skipping namespace because it is excluded")
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -23,6 +23,7 @@ import (
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -645,7 +646,7 @@ func TestBackupResourceListAllNamespacesExcludesCorrectly(t *testing.T) {
|
||||
}
|
||||
client.On("List", metav1.ListOptions{LabelSelector: labelSelector}).Return(list, nil)
|
||||
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), ns2, namespacesGroupResource).Return(nil)
|
||||
itemBackupper.On("backupItem", mock.AnythingOfType("*logrus.Entry"), ns2, kuberesource.Namespaces).Return(nil)
|
||||
|
||||
err := rb.backupResource(v1Group, namespacesResource)
|
||||
require.NoError(t, err)
|
||||
|
||||
@@ -24,11 +24,11 @@ import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
rbacclient "k8s.io/client-go/kubernetes/typed/rbac/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
)
|
||||
|
||||
// serviceAccountAction implements ItemAction.
|
||||
@@ -57,11 +57,6 @@ func (a *serviceAccountAction) AppliesTo() (ResourceSelector, error) {
|
||||
}, nil
|
||||
}
|
||||
|
||||
var (
|
||||
crbGroupResource = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterrolebindings"}
|
||||
crGroupResource = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterroles"}
|
||||
)
|
||||
|
||||
// Execute checks for any ClusterRoleBindings that have this service account as a subject, and
|
||||
// adds the ClusterRoleBinding and associated ClusterRole to the list of additional items to
|
||||
// be backed up.
|
||||
@@ -97,14 +92,14 @@ func (a *serviceAccountAction) Execute(item runtime.Unstructured, backup *v1.Bac
|
||||
var additionalItems []ResourceIdentifier
|
||||
for binding := range bindings {
|
||||
additionalItems = append(additionalItems, ResourceIdentifier{
|
||||
GroupResource: crbGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoleBindings,
|
||||
Name: binding,
|
||||
})
|
||||
}
|
||||
|
||||
for role := range roles {
|
||||
additionalItems = append(additionalItems, ResourceIdentifier{
|
||||
GroupResource: crGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoles,
|
||||
Name: role,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -29,6 +29,7 @@ import (
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
rbacclient "k8s.io/client-go/kubernetes/typed/rbac/v1"
|
||||
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
arktest "github.com/heptio/ark/pkg/util/test"
|
||||
)
|
||||
|
||||
@@ -207,27 +208,27 @@ func TestServiceAccountActionExecute(t *testing.T) {
|
||||
},
|
||||
expectedAdditionalItems: []ResourceIdentifier{
|
||||
{
|
||||
GroupResource: crbGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoleBindings,
|
||||
Name: "crb-2",
|
||||
},
|
||||
{
|
||||
GroupResource: crbGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoleBindings,
|
||||
Name: "crb-3",
|
||||
},
|
||||
{
|
||||
GroupResource: crbGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoleBindings,
|
||||
Name: "crb-4",
|
||||
},
|
||||
{
|
||||
GroupResource: crGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoles,
|
||||
Name: "role-2",
|
||||
},
|
||||
{
|
||||
GroupResource: crGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoles,
|
||||
Name: "role-3",
|
||||
},
|
||||
{
|
||||
GroupResource: crGroupResource,
|
||||
GroupResource: kuberesource.ClusterRoles,
|
||||
Name: "role-4",
|
||||
},
|
||||
},
|
||||
|
||||
31
pkg/kuberesource/kuberesource.go
Normal file
31
pkg/kuberesource/kuberesource.go
Normal file
@@ -0,0 +1,31 @@
|
||||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kuberesource
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
ClusterRoleBindings = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterrolebindings"}
|
||||
ClusterRoles = schema.GroupResource{Group: "rbac.authorization.k8s.io", Resource: "clusterroles"}
|
||||
Jobs = schema.GroupResource{Group: "batch", Resource: "jobs"}
|
||||
Namespaces = schema.GroupResource{Group: "", Resource: "namespaces"}
|
||||
PersistentVolumeClaims = schema.GroupResource{Group: "", Resource: "persistentvolumeclaims"}
|
||||
PersistentVolumes = schema.GroupResource{Group: "", Resource: "persistentvolumes"}
|
||||
Pods = schema.GroupResource{Group: "", Resource: "pods"}
|
||||
)
|
||||
@@ -46,6 +46,7 @@ import (
|
||||
"github.com/heptio/ark/pkg/cloudprovider"
|
||||
"github.com/heptio/ark/pkg/discovery"
|
||||
arkv1client "github.com/heptio/ark/pkg/generated/clientset/versioned/typed/ark/v1"
|
||||
"github.com/heptio/ark/pkg/kuberesource"
|
||||
"github.com/heptio/ark/pkg/util/boolptr"
|
||||
"github.com/heptio/ark/pkg/util/collections"
|
||||
"github.com/heptio/ark/pkg/util/kube"
|
||||
@@ -344,7 +345,7 @@ func (ctx *context) restoreFromDir(dir string) (api.RestoreResult, api.RestoreRe
|
||||
for _, resource := range ctx.prioritizedResources {
|
||||
// we don't want to explicitly restore namespace API objs because we'll handle
|
||||
// them as a special case prior to restoring anything into them
|
||||
if resource.Group == "" && resource.Resource == "namespaces" {
|
||||
if resource == kuberesource.Namespaces {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -587,7 +588,7 @@ func (ctx *context) restoreResource(resource, namespace, resourcePath string) (a
|
||||
}
|
||||
}
|
||||
|
||||
if groupResource.Group == "" && groupResource.Resource == "persistentvolumes" {
|
||||
if groupResource == kuberesource.PersistentVolumes {
|
||||
// restore the PV from snapshot (if applicable)
|
||||
updatedObj, err := ctx.executePVAction(obj)
|
||||
if err != nil {
|
||||
@@ -823,16 +824,11 @@ func hasControllerOwner(refs []metav1.OwnerReference) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// TODO(0.9): These are copied from backup/item_backupper. Definititions should be moved
|
||||
// to a shared package and imported here and in the backup package.
|
||||
var podsGroupResource = schema.GroupResource{Group: "", Resource: "pods"}
|
||||
var jobsGroupResource = schema.GroupResource{Group: "batch", Resource: "jobs"}
|
||||
|
||||
// isCompleted returns whether or not an object is considered completed.
|
||||
// Used to identify whether or not an object should be restored. Only Jobs or Pods are considered
|
||||
func isCompleted(obj *unstructured.Unstructured, groupResource schema.GroupResource) (bool, error) {
|
||||
switch groupResource {
|
||||
case podsGroupResource:
|
||||
case kuberesource.Pods:
|
||||
phase, _, err := unstructured.NestedString(obj.UnstructuredContent(), "status", "phase")
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
@@ -841,7 +837,7 @@ func isCompleted(obj *unstructured.Unstructured, groupResource schema.GroupResou
|
||||
return true, nil
|
||||
}
|
||||
|
||||
case jobsGroupResource:
|
||||
case kuberesource.Jobs:
|
||||
ct, found, err := unstructured.NestedString(obj.UnstructuredContent(), "status", "completionTime")
|
||||
if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
|
||||
@@ -18,7 +18,6 @@ package restore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"testing"
|
||||
@@ -736,7 +735,7 @@ func TestIsCompleted(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
u := unstructuredOrDie(fmt.Sprintf(test.content))
|
||||
u := unstructuredOrDie(test.content)
|
||||
backup, err := isCompleted(u, test.groupResource)
|
||||
|
||||
if assert.Equal(t, test.expectedErr, err != nil) {
|
||||
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
Copyright 2018 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package kube
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var PodsGroupResource = schema.GroupResource{Group: "", Resource: "pods"}
|
||||
var JobsGroupResource = schema.GroupResource{Group: "batch", Resource: "jobs"}
|
||||
var NamespacesGroupResource = schema.GroupResource{Group: "", Resource: "namespaces"}
|
||||
Reference in New Issue
Block a user