Merge pull request #7697 from blackpiglet/backup_volumeinfo_cli_update

Modify namespace filter logic for backup with label selector
This commit is contained in:
Xun Jiang/Bruce Jiang
2024-04-22 15:57:50 +08:00
committed by GitHub
20 changed files with 615 additions and 156 deletions

View File

@@ -7,8 +7,16 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Check out the code
uses: actions/checkout@v2
- name: Set up Go
uses: actions/setup-go@v5
with:
go-version: '1.22'
- name: Linter check
run: make lint
- name: Check out the code
uses: actions/checkout@v4
- name: golangci-lint
uses: golangci/golangci-lint-action@v4
with:
version: v1.57.2
args: --out-format=colored-line-number

View File

@@ -12,14 +12,6 @@ run:
# exit code when at least one issue was found, default is 1
issues-exit-code: 1
# which dirs to skip: issues from them won't be reported;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but default dirs are skipped independently
# from this option's value (see skip-dirs-use-default).
# "/" will be replaced by current OS file path separator to properly work
# on Windows.
skip-dirs:
- pkg/plugin/generated/*
# default is true. Enables skipping of directories:
# vendor$, third_party$, testdata$, examples$, Godeps$, builtin$
@@ -44,7 +36,7 @@ run:
# output configuration options
output:
# colored-line-number|line-number|json|tab|checkstyle|code-climate, default is "colored-line-number"
format: colored-line-number
formats: colored-line-number
# print lines of code with issue, default is true
print-issued-lines: true
@@ -148,10 +140,8 @@ linters-settings:
# minimal confidence for issues, default is 0.8
min-confidence: 0.8
gomnd:
settings:
mnd:
# the list of enabled checks, see https://github.com/tommy-muehle/go-mnd/#checks for description.
checks: argument,case,condition,operation,return,assign
# the list of enabled checks, see https://github.com/tommy-muehle/go-mnd/#checks for description.
checks: argument,case,condition,operation,return,assign
gomodguard:
allowed:
modules: # List of allowed modules
@@ -389,6 +379,15 @@ issues:
# Show only new issues created after git revision `REV`
# new-from-rev: origin/main
# which dirs to skip: issues from them won't be reported;
# can use regexp here: generated.*, regexp is applied on full path;
# default value is empty list, but default dirs are skipped independently
# from this option's value (see skip-dirs-use-default).
# "/" will be replaced by current OS file path separator to properly work
# on Windows.
exclude-dirs:
- pkg/plugin/generated/*
severity:
# Default value is empty string.
# Set the default severity for issues. If severity rules are defined and the issues

View File

@@ -0,0 +1 @@
When Included/ExcludedNamespaces are omitted, and LabelSelector or OrLabelSelector is used, namespaces without selected items are excluded from backup.

View File

@@ -93,7 +93,7 @@ RUN ARCH=$(go env GOARCH) && \
chmod +x /usr/bin/goreleaser
# get golangci-lint
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.55.0
RUN curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s -- -b $(go env GOPATH)/bin v1.57.2
# install kubectl
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/$(go env GOARCH)/kubectl

View File

@@ -14,14 +14,12 @@
# See the License for the specific language governing permissions and
# limitations under the License.
HACK_DIR=$(dirname "${BASH_SOURCE[0]}")
# Printing out cache status
golangci-lint cache status
# Enable GL_DEBUG line below for debug messages for golangci-lint
# export GL_DEBUG=loader,gocritic,env
CMD="golangci-lint run -c $HACK_DIR/../golangci.yaml"
CMD="golangci-lint run"
echo "Running $CMD"
eval $CMD

View File

@@ -1976,7 +1976,7 @@ func TestValidateContainer(t *testing.T) {
expectedError := fmt.Errorf("invalid InitContainer in restore hook, it doesn't have Command, Name or Image field")
// valid string should return nil as result.
assert.Equal(t, nil, ValidateContainer([]byte(valid)))
assert.Nil(t, ValidateContainer([]byte(valid)))
// noName string should return expected error as result.
assert.Equal(t, expectedError, ValidateContainer([]byte(noName)))

View File

@@ -4327,10 +4327,6 @@ func TestBackupNamespaces(t *testing.T) {
"resources/namespaces/v1-preferredversion/cluster/ns-1.json",
"resources/deployments.apps/namespaces/ns-1/deploy-1.json",
"resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json",
"resources/namespaces/cluster/ns-2.json",
"resources/namespaces/v1-preferredversion/cluster/ns-2.json",
"resources/namespaces/cluster/ns-3.json",
"resources/namespaces/v1-preferredversion/cluster/ns-3.json",
},
},
{
@@ -4338,8 +4334,7 @@ func TestBackupNamespaces(t *testing.T) {
backup: defaultBackup().OrLabelSelector([]*metav1.LabelSelector{
{MatchLabels: map[string]string{"a": "b"}},
{MatchLabels: map[string]string{"c": "d"}},
}).
Result(),
}).Result(),
apiResources: []*test.APIResource{
test.Namespaces(
builder.ForNamespace("ns-1").Result(),
@@ -4356,8 +4351,6 @@ func TestBackupNamespaces(t *testing.T) {
"resources/namespaces/v1-preferredversion/cluster/ns-1.json",
"resources/namespaces/cluster/ns-2.json",
"resources/namespaces/v1-preferredversion/cluster/ns-2.json",
"resources/namespaces/cluster/ns-3.json",
"resources/namespaces/v1-preferredversion/cluster/ns-3.json",
"resources/deployments.apps/namespaces/ns-1/deploy-1.json",
"resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json",
"resources/deployments.apps/namespaces/ns-2/deploy-2.json",
@@ -4385,6 +4378,27 @@ func TestBackupNamespaces(t *testing.T) {
"resources/deployments.apps/v1-preferredversion/namespaces/ns-1/deploy-1.json",
},
},
{
name: "LabelSelector and Namespace exclude filtering test",
backup: defaultBackup().ExcludedNamespaces("ns-1", "ns-2").LabelSelector(&metav1.LabelSelector{MatchLabels: map[string]string{"a": "b"}}).
Result(),
apiResources: []*test.APIResource{
test.Namespaces(
builder.ForNamespace("ns-1").ObjectMeta(builder.WithLabels("a", "b")).Result(),
builder.ForNamespace("ns-2").Result(),
builder.ForNamespace("ns-3").Result(),
),
test.Deployments(
builder.ForDeployment("ns-1", "deploy-1").ObjectMeta(builder.WithLabels("a", "b")).Result(),
),
},
want: []string{
"resources/namespaces/cluster/ns-1.json",
"resources/namespaces/v1-preferredversion/cluster/ns-1.json",
"resources/namespaces/cluster/ns-3.json",
"resources/namespaces/v1-preferredversion/cluster/ns-3.json",
},
},
{
name: "Empty namespace test",
backup: defaultBackup().IncludedNamespaces("invalid*").Result(),

View File

@@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/tools/pager"
@@ -50,6 +51,127 @@ type itemCollector struct {
cohabitatingResources map[string]*cohabitatingResource
dir string
pageSize int
nsTracker nsTracker
}
// nsTracker is used to integrate several namespace filters together.
// 1. Backup's namespace Include/Exclude filters;
// 2. Backup's (Or)LabelSelector selected namespace;
// 3. Backup's (Or)LabelSelector selected resources' namespaces.
//
// Rules:
//
// a. When backup namespace Include/Exclude filters get everything,
// The namespaces, which do not have backup including resources,
// are not collected.
//
// b. If the namespace I/E filters and the (Or)LabelSelectors selected
// namespaces are different. The tracker takes the union of them.
type nsTracker struct {
singleLabelSelector labels.Selector
orLabelSelector []labels.Selector
namespaceFilter *collections.IncludesExcludes
logger logrus.FieldLogger
namespaceMap map[string]bool
}
// track add the namespace into the namespaceMap.
func (nt *nsTracker) track(ns string) {
if nt.namespaceMap == nil {
nt.namespaceMap = make(map[string]bool)
}
if _, ok := nt.namespaceMap[ns]; !ok {
nt.namespaceMap[ns] = true
}
}
// isTracked check whether the namespace's name exists in
// namespaceMap.
func (nt *nsTracker) isTracked(ns string) bool {
if nt.namespaceMap != nil {
return nt.namespaceMap[ns]
}
return false
}
// init initialize the namespaceMap, and add elements according to
// namespace include/exclude filters and the backup label selectors.
func (nt *nsTracker) init(
unstructuredNSs []unstructured.Unstructured,
singleLabelSelector labels.Selector,
orLabelSelector []labels.Selector,
namespaceFilter *collections.IncludesExcludes,
logger logrus.FieldLogger,
) {
if nt.namespaceMap == nil {
nt.namespaceMap = make(map[string]bool)
}
nt.singleLabelSelector = singleLabelSelector
nt.orLabelSelector = orLabelSelector
nt.namespaceFilter = namespaceFilter
nt.logger = logger
for _, namespace := range unstructuredNSs {
if nt.singleLabelSelector != nil &&
nt.singleLabelSelector.Matches(labels.Set(namespace.GetLabels())) {
nt.logger.Debugf(`Track namespace %s,
because its labels match backup LabelSelector.`,
namespace.GetName(),
)
nt.track(namespace.GetName())
continue
}
if len(nt.orLabelSelector) > 0 {
for _, selector := range nt.orLabelSelector {
if selector.Matches(labels.Set(namespace.GetLabels())) {
nt.logger.Debugf(`Track namespace %s",
"because its labels match the backup OrLabelSelector.`,
namespace.GetName(),
)
nt.track(namespace.GetName())
continue
}
}
}
// Skip the backup when the backup's namespace filter has
// default value, and the namespace doesn't match backup
// LabelSelector and OrLabelSelector.
// https://github.com/vmware-tanzu/velero/issues/7105
if nt.namespaceFilter.IncludeEverything() &&
(nt.singleLabelSelector != nil || len(nt.orLabelSelector) > 0) {
continue
}
if nt.namespaceFilter.ShouldInclude(namespace.GetName()) {
nt.logger.Debugf(`Track namespace %s,
because its name match the backup namespace filter.`,
namespace.GetName(),
)
nt.track(namespace.GetName())
}
}
}
// filterNamespaces filters the input resource list to remove the
// namespaces not tracked by the nsTracker.
func (nt *nsTracker) filterNamespaces(
resources []*kubernetesResource,
) []*kubernetesResource {
result := make([]*kubernetesResource, 0)
for _, resource := range resources {
if resource.groupResource != kuberesource.Namespaces ||
nt.isTracked(resource.name) {
result = append(result, resource)
}
}
return result
}
type kubernetesResource struct {
@@ -58,35 +180,47 @@ type kubernetesResource struct {
namespace, name, path string
}
// getItemsFromResourceIdentifiers converts ResourceIdentifiers to
// kubernetesResources
func (r *itemCollector) getItemsFromResourceIdentifiers(resourceIDs []velero.ResourceIdentifier) []*kubernetesResource {
// getItemsFromResourceIdentifiers get the kubernetesResources
// specified by the input parameter resourceIDs.
func (r *itemCollector) getItemsFromResourceIdentifiers(
resourceIDs []velero.ResourceIdentifier,
) []*kubernetesResource {
grResourceIDsMap := make(map[schema.GroupResource][]velero.ResourceIdentifier)
for _, resourceID := range resourceIDs {
grResourceIDsMap[resourceID.GroupResource] = append(grResourceIDsMap[resourceID.GroupResource], resourceID)
grResourceIDsMap[resourceID.GroupResource] = append(
grResourceIDsMap[resourceID.GroupResource], resourceID)
}
return r.getItems(grResourceIDsMap)
}
// getAllItems gets all relevant items from all API groups.
// getAllItems gets all backup-relevant items from all API groups.
func (r *itemCollector) getAllItems() []*kubernetesResource {
return r.getItems(nil)
resources := r.getItems(nil)
return r.nsTracker.filterNamespaces(resources)
}
// getItems gets all relevant items from all API groups.
// getItems gets all backup-relevant items from all API groups,
//
// If resourceIDsMap is nil, then all items from the cluster are
// pulled for each API group, subject to include/exclude rules.
// pulled for each API group, subject to include/exclude rules,
// except the namespace, because the namespace filtering depends on
// all namespaced-scoped resources.
//
// If resourceIDsMap is supplied, then only those resources are
// returned, with the appropriate APIGroup information filled in. In
// this case, include/exclude rules are not invoked, since we already
// have the list of items, we just need the item collector/discovery
// helper to fill in the missing GVR, etc. context.
func (r *itemCollector) getItems(resourceIDsMap map[schema.GroupResource][]velero.ResourceIdentifier) []*kubernetesResource {
func (r *itemCollector) getItems(
resourceIDsMap map[schema.GroupResource][]velero.ResourceIdentifier,
) []*kubernetesResource {
var resources []*kubernetesResource
for _, group := range r.discoveryHelper.Resources() {
groupItems, err := r.getGroupItems(r.log, group, resourceIDsMap)
if err != nil {
r.log.WithError(err).WithField("apiGroup", group.String()).Error("Error collecting resources from API group")
r.log.WithError(err).WithField("apiGroup", group.String()).
Error("Error collecting resources from API group")
continue
}
@@ -99,7 +233,11 @@ func (r *itemCollector) getItems(resourceIDsMap map[schema.GroupResource][]veler
// getGroupItems collects all relevant items from a single API group.
// If resourceIDsMap is supplied, then only those items are returned,
// with GVR/APIResource metadata supplied.
func (r *itemCollector) getGroupItems(log logrus.FieldLogger, group *metav1.APIResourceList, resourceIDsMap map[schema.GroupResource][]velero.ResourceIdentifier) ([]*kubernetesResource, error) {
func (r *itemCollector) getGroupItems(
log logrus.FieldLogger,
group *metav1.APIResourceList,
resourceIDsMap map[schema.GroupResource][]velero.ResourceIdentifier,
) ([]*kubernetesResource, error) {
log = log.WithField("group", group.GroupVersion)
log.Infof("Getting items for group")
@@ -107,11 +245,12 @@ func (r *itemCollector) getGroupItems(log logrus.FieldLogger, group *metav1.APIR
// Parse so we can check if this is the core group
gv, err := schema.ParseGroupVersion(group.GroupVersion)
if err != nil {
return nil, errors.Wrapf(err, "error parsing GroupVersion %q", group.GroupVersion)
return nil, errors.Wrapf(err, "error parsing GroupVersion %q",
group.GroupVersion)
}
if gv.Group == "" {
// This is the core group, so make sure we process in the following order: pods, pvcs, pvs,
// everything else.
// This is the core group, so make sure we process in the following order:
// pods, pvcs, pvs, everything else.
sortCoreGroup(group)
}
@@ -119,7 +258,8 @@ func (r *itemCollector) getGroupItems(log logrus.FieldLogger, group *metav1.APIR
for _, resource := range group.APIResources {
resourceItems, err := r.getResourceItems(log, gv, resource, resourceIDsMap)
if err != nil {
log.WithError(err).WithField("resource", resource.String()).Error("Error getting items for resource")
log.WithError(err).WithField("resource", resource.String()).
Error("Error getting items for resource")
continue
}
@@ -129,8 +269,13 @@ func (r *itemCollector) getGroupItems(log logrus.FieldLogger, group *metav1.APIR
return items, nil
}
// sortResourcesByOrder sorts items by the names specified in "order". Items are not in order will be put at the end in original order.
func sortResourcesByOrder(log logrus.FieldLogger, items []*kubernetesResource, order []string) []*kubernetesResource {
// sortResourcesByOrder sorts items by the names specified in "order".
// Items are not in order will be put at the end in original order.
func sortResourcesByOrder(
log logrus.FieldLogger,
items []*kubernetesResource,
order []string,
) []*kubernetesResource {
if len(order) == 0 {
return items
}
@@ -175,7 +320,10 @@ func sortResourcesByOrder(log logrus.FieldLogger, items []*kubernetesResource, o
}
// getOrderedResourcesForType gets order of resourceType from orderResources.
func getOrderedResourcesForType(orderedResources map[string]string, resourceType string) []string {
func getOrderedResourcesForType(
orderedResources map[string]string,
resourceType string,
) []string {
if orderedResources == nil {
return nil
}
@@ -190,7 +338,12 @@ func getOrderedResourcesForType(orderedResources map[string]string, resourceType
// getResourceItems collects all relevant items for a given group-version-resource.
// If resourceIDsMap is supplied, the items will be pulled from here
// rather than from the cluster and applying include/exclude rules.
func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.GroupVersion, resource metav1.APIResource, resourceIDsMap map[schema.GroupResource][]velero.ResourceIdentifier) ([]*kubernetesResource, error) {
func (r *itemCollector) getResourceItems(
log logrus.FieldLogger,
gv schema.GroupVersion,
resource metav1.APIResource,
resourceIDsMap map[schema.GroupResource][]velero.ResourceIdentifier,
) ([]*kubernetesResource, error) {
log = log.WithField("resource", resource.Name)
log.Info("Getting items for resource")
@@ -200,7 +353,10 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
gr = gvr.GroupResource()
)
orders := getOrderedResourcesForType(r.backupRequest.Backup.Spec.OrderedResources, resource.Name)
orders := getOrderedResourcesForType(
r.backupRequest.Backup.Spec.OrderedResources,
resource.Name,
)
// Getting the preferred group version of this resource
preferredGVR, _, err := r.discoveryHelper.ResourceFor(gr.WithVersion(""))
if err != nil {
@@ -222,7 +378,11 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
"name": resourceID.Name,
},
).Infof("Getting item")
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, resourceID.Namespace)
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(
gv,
resource,
resourceID.Namespace,
)
if err != nil {
log.WithError(errors.WithStack(err)).Error("Error getting client for resource")
continue
@@ -257,7 +417,8 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
}
if cohabitator, found := r.cohabitatingResources[resource.Name]; found {
if gv.Group == cohabitator.groupResource1.Group || gv.Group == cohabitator.groupResource2.Group {
if gv.Group == cohabitator.groupResource1.Group ||
gv.Group == cohabitator.groupResource2.Group {
if cohabitator.seen {
log.WithFields(
logrus.Fields{
@@ -272,23 +433,16 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
}
// Handle namespace resource here.
// Namespace are only filtered by namespace include/exclude filters.
// Label selectors are not checked.
// Namespace are filtered by namespace include/exclude filters,
// backup LabelSelectors and OrLabelSelectors are checked too.
if gr == kuberesource.Namespaces {
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
if err != nil {
log.WithError(err).Error("Error getting dynamic client")
return nil, errors.WithStack(err)
}
unstructuredList, err := resourceClient.List(metav1.ListOptions{})
if err != nil {
log.WithError(errors.WithStack(err)).Error("error list namespaces")
return nil, errors.WithStack(err)
}
items := r.backupNamespaces(unstructuredList, r.backupRequest.NamespaceIncludesExcludes, gr, preferredGVR, log)
return items, nil
return r.collectNamespaces(
resource,
gv,
gr,
preferredGVR,
log,
)
}
clusterScoped := !resource.Namespaced
@@ -302,57 +456,12 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
var items []*kubernetesResource
for _, namespace := range namespacesToList {
// List items from Kubernetes API
log = log.WithField("namespace", namespace)
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace)
unstructuredItems, err := r.listResourceByLabelsPerNamespace(
namespace, gr, gv, resource, log)
if err != nil {
log.WithError(err).Error("Error getting dynamic client")
continue
}
var orLabelSelectors []string
if r.backupRequest.Spec.OrLabelSelectors != nil {
for _, s := range r.backupRequest.Spec.OrLabelSelectors {
orLabelSelectors = append(orLabelSelectors, metav1.FormatLabelSelector(s))
}
} else {
orLabelSelectors = []string{}
}
log.Info("Listing items")
unstructuredItems := make([]unstructured.Unstructured, 0)
// Listing items for orLabelSelectors
errListingForNS := false
for _, label := range orLabelSelectors {
unstructuredItems, err = r.listItemsForLabel(unstructuredItems, gr, label, resourceClient)
if err != nil {
errListingForNS = true
}
}
if errListingForNS {
log.WithError(err).Error("Error listing items")
continue
}
var labelSelector string
if selector := r.backupRequest.Spec.LabelSelector; selector != nil {
labelSelector = metav1.FormatLabelSelector(selector)
}
// Listing items for labelSelector (singular)
if len(orLabelSelectors) == 0 {
unstructuredItems, err = r.listItemsForLabel(unstructuredItems, gr, labelSelector, resourceClient)
if err != nil {
log.WithError(err).Error("Error listing items")
continue
}
}
log.Infof("Retrieved %d items", len(unstructuredItems))
// Collect items in included Namespaces
for i := range unstructuredItems {
item := &unstructuredItems[i]
@@ -370,8 +479,14 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
name: item.GetName(),
path: path,
})
if item.GetNamespace() != "" {
log.Debugf("Track namespace %s in nsTracker", item.GetNamespace())
r.nsTracker.track(item.GetNamespace())
}
}
}
if len(orders) > 0 {
items = sortResourcesByOrder(r.log, items, orders)
}
@@ -379,6 +494,71 @@ func (r *itemCollector) getResourceItems(log logrus.FieldLogger, gv schema.Group
return items, nil
}
func (r *itemCollector) listResourceByLabelsPerNamespace(
namespace string,
gr schema.GroupResource,
gv schema.GroupVersion,
resource metav1.APIResource,
logger logrus.FieldLogger,
) ([]unstructured.Unstructured, error) {
// List items from Kubernetes API
logger = logger.WithField("namespace", namespace)
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, namespace)
if err != nil {
logger.WithError(err).Error("Error getting dynamic client")
return nil, err
}
var orLabelSelectors []string
if r.backupRequest.Spec.OrLabelSelectors != nil {
for _, s := range r.backupRequest.Spec.OrLabelSelectors {
orLabelSelectors = append(orLabelSelectors, metav1.FormatLabelSelector(s))
}
} else {
orLabelSelectors = []string{}
}
logger.Info("Listing items")
unstructuredItems := make([]unstructured.Unstructured, 0)
// Listing items for orLabelSelectors
errListingForNS := false
for _, label := range orLabelSelectors {
unstructuredItems, err = r.listItemsForLabel(unstructuredItems, gr, label, resourceClient)
if err != nil {
errListingForNS = true
}
}
if errListingForNS {
logger.WithError(err).Error("Error listing items")
return nil, err
}
var labelSelector string
if selector := r.backupRequest.Spec.LabelSelector; selector != nil {
labelSelector = metav1.FormatLabelSelector(selector)
}
// Listing items for labelSelector (singular)
if len(orLabelSelectors) == 0 {
unstructuredItems, err = r.listItemsForLabel(
unstructuredItems,
gr,
labelSelector,
resourceClient,
)
if err != nil {
logger.WithError(err).Error("Error listing items")
return nil, err
}
}
logger.Infof("Retrieved %d items", len(unstructuredItems))
return unstructuredItems, nil
}
func (r *itemCollector) writeToFile(item *unstructured.Unstructured) (string, error) {
f, err := os.CreateTemp(r.dir, "")
if err != nil {
@@ -475,7 +655,11 @@ func newCohabitatingResource(resource, group1, group2 string) *cohabitatingResou
}
// function to process pager client calls when the pageSize is specified
func (r *itemCollector) processPagerClientCalls(gr schema.GroupResource, label string, resourceClient client.Dynamic) (runtime.Object, error) {
func (r *itemCollector) processPagerClientCalls(
gr schema.GroupResource,
label string,
resourceClient client.Dynamic,
) (runtime.Object, error) {
// If limit is positive, use a pager to split list over multiple requests
// Use Velero's dynamic list function instead of the default
listPager := pager.New(pager.SimplePageFunc(func(opts metav1.ListOptions) (runtime.Object, error) {
@@ -499,7 +683,12 @@ func (r *itemCollector) processPagerClientCalls(gr schema.GroupResource, label s
return list, nil
}
func (r *itemCollector) listItemsForLabel(unstructuredItems []unstructured.Unstructured, gr schema.GroupResource, label string, resourceClient client.Dynamic) ([]unstructured.Unstructured, error) {
func (r *itemCollector) listItemsForLabel(
unstructuredItems []unstructured.Unstructured,
gr schema.GroupResource,
label string,
resourceClient client.Dynamic,
) ([]unstructured.Unstructured, error) {
if r.pageSize > 0 {
// process pager client calls
list, err := r.processPagerClientCalls(gr, label, resourceClient)
@@ -510,7 +699,8 @@ func (r *itemCollector) listItemsForLabel(unstructuredItems []unstructured.Unstr
err = meta.EachListItem(list, func(object runtime.Object) error {
u, ok := object.(*unstructured.Unstructured)
if !ok {
r.log.WithError(errors.WithStack(fmt.Errorf("expected *unstructured.Unstructured but got %T", u))).Error("unable to understand entry in the list")
r.log.WithError(errors.WithStack(fmt.Errorf("expected *unstructured.Unstructured but got %T", u))).
Error("unable to understand entry in the list")
return fmt.Errorf("expected *unstructured.Unstructured but got %T", u)
}
unstructuredItems = append(unstructuredItems, *u)
@@ -531,29 +721,74 @@ func (r *itemCollector) listItemsForLabel(unstructuredItems []unstructured.Unstr
return unstructuredItems, nil
}
// backupNamespaces process namespace resource according to namespace filters.
func (r *itemCollector) backupNamespaces(unstructuredList *unstructured.UnstructuredList,
ie *collections.IncludesExcludes, gr schema.GroupResource, preferredGVR schema.GroupVersionResource,
log logrus.FieldLogger) []*kubernetesResource {
var items []*kubernetesResource
for index, unstructured := range unstructuredList.Items {
if ie.ShouldInclude(unstructured.GetName()) {
log.Debugf("Backup namespace %s due to namespace filters setting.", unstructured.GetName())
// collectNamespaces process namespace resource according to namespace filters.
func (r *itemCollector) collectNamespaces(
resource metav1.APIResource,
gv schema.GroupVersion,
gr schema.GroupResource,
preferredGVR schema.GroupVersionResource,
log logrus.FieldLogger,
) ([]*kubernetesResource, error) {
resourceClient, err := r.dynamicFactory.ClientForGroupVersionResource(gv, resource, "")
if err != nil {
log.WithError(err).Error("Error getting dynamic client")
return nil, errors.WithStack(err)
}
path, err := r.writeToFile(&unstructuredList.Items[index])
unstructuredList, err := resourceClient.List(metav1.ListOptions{})
if err != nil {
log.WithError(errors.WithStack(err)).Error("error list namespaces")
return nil, errors.WithStack(err)
}
var singleSelector labels.Selector
var orSelectors []labels.Selector
if r.backupRequest.Backup.Spec.LabelSelector != nil {
var err error
singleSelector, err = metav1.LabelSelectorAsSelector(
r.backupRequest.Backup.Spec.LabelSelector)
if err != nil {
log.WithError(err).Errorf("Fail to convert backup LabelSelector %s into selector.",
metav1.FormatLabelSelector(r.backupRequest.Backup.Spec.LabelSelector))
}
}
if r.backupRequest.Backup.Spec.OrLabelSelectors != nil {
for _, ls := range r.backupRequest.Backup.Spec.OrLabelSelectors {
orSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
log.WithError(err).Error("Error writing item to file")
continue
log.WithError(err).Errorf("Fail to convert backup OrLabelSelector %s into selector.",
metav1.FormatLabelSelector(ls))
}
items = append(items, &kubernetesResource{
groupResource: gr,
preferredGVR: preferredGVR,
name: unstructured.GetName(),
path: path,
})
orSelectors = append(orSelectors, orSelector)
}
}
return items
r.nsTracker.init(
unstructuredList.Items,
singleSelector,
orSelectors,
r.backupRequest.NamespaceIncludesExcludes,
log,
)
var items []*kubernetesResource
for index := range unstructuredList.Items {
path, err := r.writeToFile(&unstructuredList.Items[index])
if err != nil {
log.WithError(err).Errorf("Error writing item %s to file",
unstructuredList.Items[index].GetName())
continue
}
items = append(items, &kubernetesResource{
groupResource: gr,
preferredGVR: preferredGVR,
name: unstructuredList.Items[index].GetName(),
path: path,
})
}
return items, nil
}

View File

@@ -17,11 +17,23 @@ limitations under the License.
package backup
import (
"os"
"testing"
"github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/builder"
"github.com/vmware-tanzu/velero/pkg/kuberesource"
"github.com/vmware-tanzu/velero/pkg/test"
"github.com/vmware-tanzu/velero/pkg/util/collections"
)
func TestSortCoreGroup(t *testing.T) {
@@ -77,3 +89,191 @@ func TestSortOrderedResource(t *testing.T) {
sortedPvResources := sortResourcesByOrder(log, pvResources, pvOrder)
assert.Equal(t, sortedPvResources, expectedPvResources)
}
func TestFilterNamespaces(t *testing.T) {
tests := []struct {
name string
resources []*kubernetesResource
needToTrack string
expectedResources []*kubernetesResource
}{
{
name: "Namespace include by the filter but not in namespacesContainResource",
resources: []*kubernetesResource{
{
groupResource: kuberesource.Namespaces,
preferredGVR: kuberesource.Namespaces.WithVersion("v1"),
name: "ns1",
},
{
groupResource: kuberesource.Namespaces,
preferredGVR: kuberesource.Namespaces.WithVersion("v1"),
name: "ns2",
},
{
groupResource: kuberesource.Pods,
preferredGVR: kuberesource.Namespaces.WithVersion("v1"),
name: "pod1",
},
},
needToTrack: "ns1",
expectedResources: []*kubernetesResource{
{
groupResource: kuberesource.Namespaces,
preferredGVR: kuberesource.Namespaces.WithVersion("v1"),
name: "ns1",
},
{
groupResource: kuberesource.Pods,
preferredGVR: kuberesource.Namespaces.WithVersion("v1"),
name: "pod1",
},
},
},
}
for _, tc := range tests {
t.Run(tc.name, func(*testing.T) {
r := itemCollector{
backupRequest: &Request{},
}
if tc.needToTrack != "" {
r.nsTracker.track(tc.needToTrack)
}
require.Equal(t, tc.expectedResources, r.nsTracker.filterNamespaces(tc.resources))
})
}
}
func TestItemCollectorBackupNamespaces(t *testing.T) {
tests := []struct {
name string
ie *collections.IncludesExcludes
namespaces []*corev1.Namespace
backup *velerov1api.Backup
expectedTrackedNS []string
}{
{
name: "ns filter by namespace IE filter",
backup: builder.ForBackup("velero", "backup").Result(),
ie: collections.NewIncludesExcludes().Includes("ns1"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").Result(),
builder.ForNamespace("ns2").Result(),
},
expectedTrackedNS: []string{"ns1"},
},
{
name: "ns filter by backup labelSelector",
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{"name": "ns1"},
}).Result(),
ie: collections.NewIncludesExcludes().Includes("*"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
builder.ForNamespace("ns2").Result(),
},
expectedTrackedNS: []string{"ns1"},
},
{
name: "ns filter by backup orLabelSelector",
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
{MatchLabels: map[string]string{"name": "ns1"}},
}).Result(),
ie: collections.NewIncludesExcludes().Includes("*"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
builder.ForNamespace("ns2").Result(),
},
expectedTrackedNS: []string{"ns1"},
},
{
name: "ns not included by IE filter, but included by labelSelector",
backup: builder.ForBackup("velero", "backup").LabelSelector(&metav1.LabelSelector{
MatchLabels: map[string]string{"name": "ns1"},
}).Result(),
ie: collections.NewIncludesExcludes().Excludes("ns1"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
builder.ForNamespace("ns2").Result(),
},
expectedTrackedNS: []string{"ns1"},
},
{
name: "ns not included by IE filter, but included by orLabelSelector",
backup: builder.ForBackup("velero", "backup").OrLabelSelector([]*metav1.LabelSelector{
{MatchLabels: map[string]string{"name": "ns1"}},
}).Result(),
ie: collections.NewIncludesExcludes().Excludes("ns1", "ns2"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
builder.ForNamespace("ns2").Result(),
builder.ForNamespace("ns3").Result(),
},
expectedTrackedNS: []string{"ns1", "ns3"},
},
{
name: "No ns filters",
backup: builder.ForBackup("velero", "backup").Result(),
ie: collections.NewIncludesExcludes().Includes("*"),
namespaces: []*corev1.Namespace{
builder.ForNamespace("ns1").ObjectMeta(builder.WithLabels("name", "ns1")).Result(),
builder.ForNamespace("ns2").Result(),
},
expectedTrackedNS: []string{"ns1", "ns2"},
},
}
for _, tc := range tests {
t.Run(tc.name, func(*testing.T) {
tempDir, err := os.MkdirTemp("", "")
require.NoError(t, err)
var unstructuredNSList unstructured.UnstructuredList
for _, ns := range tc.namespaces {
unstructuredNS, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ns)
require.NoError(t, err)
unstructuredNSList.Items = append(unstructuredNSList.Items,
unstructured.Unstructured{Object: unstructuredNS})
}
dc := &test.FakeDynamicClient{}
dc.On("List", mock.Anything).Return(&unstructuredNSList, nil)
factory := &test.FakeDynamicFactory{}
factory.On(
"ClientForGroupVersionResource",
mock.Anything,
mock.Anything,
mock.Anything,
).Return(dc, nil)
r := itemCollector{
backupRequest: &Request{
Backup: tc.backup,
NamespaceIncludesExcludes: tc.ie,
},
dynamicFactory: factory,
dir: tempDir,
}
r.collectNamespaces(
metav1.APIResource{
Name: "Namespace",
Kind: "Namespace",
Namespaced: false,
},
kuberesource.Namespaces.WithVersion("").GroupVersion(),
kuberesource.Namespaces,
kuberesource.Namespaces.WithVersion(""),
logrus.StandardLogger(),
)
for _, ns := range tc.expectedTrackedNS {
require.True(t, r.nsTracker.isTracked(ns))
}
})
}
}

View File

@@ -79,7 +79,7 @@ func TestConfig(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
client, err := Config(test.kubeconfig, test.kubecontext, "velero", test.QPS, test.burst)
assert.Equal(t, err, nil)
assert.Nil(t, err)
assert.Equal(t, test.expectedHost, client.Host)
assert.Equal(t, test.QPS, client.QPS)
assert.Equal(t, test.burst, client.Burst)

View File

@@ -62,13 +62,13 @@ func TestConfigOperations(t *testing.T) {
// Remove config file if it exists
err := removeConfigfileName()
assert.Equal(t, err, nil)
assert.NoError(t, err)
// Test LoadConfig: expect an empty velero config
expectedConfig := VeleroConfig{}
config, err := LoadConfig()
assert.Equal(t, err, nil)
assert.NoError(t, err)
assert.True(t, reflect.DeepEqual(expectedConfig, config))
// Test savedConfig
@@ -84,9 +84,9 @@ func TestConfigOperations(t *testing.T) {
err = SaveConfig(config)
assert.Equal(t, err, nil)
assert.NoError(t, err)
savedConfig, err := LoadConfig()
assert.Equal(t, err, nil)
assert.NoError(t, err)
// Test Features
feature := savedConfig.Features()
@@ -107,7 +107,7 @@ func TestConfigOperations(t *testing.T) {
t.Cleanup(func() {
err = removeConfigfileName()
assert.Equal(t, err, nil)
assert.NoError(t, err)
os.Unsetenv("HOME")
os.Setenv("HOME", preHomeEnv)
})

View File

@@ -95,7 +95,7 @@ func TestFactory(t *testing.T) {
baseName := "velero-bn"
config, err := LoadConfig()
assert.Equal(t, err, nil)
assert.NoError(t, err)
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
f = NewFactory(baseName, config)

View File

@@ -62,15 +62,15 @@ func TestDeleteCommand(t *testing.T) {
args := []string{backup1, backup2}
e := o.Complete(f, args)
require.Equal(t, nil, e)
require.NoError(t, e)
e = o.Validate(c, f, args)
require.Equal(t, nil, e)
require.NoError(t, e)
Run(o)
e = c.Execute()
require.Equal(t, nil, e)
require.NoError(t, e)
if os.Getenv(cmdtest.CaptureFlag) == "1" {
return

View File

@@ -148,7 +148,7 @@ func TestCreateCommand_Run(t *testing.T) {
o.Complete(args, f)
e := o.Validate(c, args, f)
assert.Equal(t, e, nil)
assert.Nil(t, e)
e = o.Run(c, f)
assert.Contains(t, e.Error(), fmt.Sprintf("%s: no such file or directory", caCertFile))

View File

@@ -66,7 +66,7 @@ func TestNewSetCommand(t *testing.T) {
args := []string{backupName}
o.Complete(args, f)
e := o.Validate(c, args, f)
assert.Equal(t, e, nil)
assert.Nil(t, e)
e = o.Run(c, f)
assert.Contains(t, e.Error(), fmt.Sprintf("%s: no such file or directory", cacert))

View File

@@ -62,15 +62,15 @@ func TestDeleteCommand(t *testing.T) {
args := []string{restore1, restore2}
e := o.Complete(f, args)
require.Equal(t, nil, e)
require.NoError(t, e)
e = o.Validate(c, f, args)
require.Equal(t, nil, e)
require.NoError(t, e)
Run(o)
e = c.Execute()
require.Equal(t, nil, e)
require.NoError(t, e)
if os.Getenv(cmdtest.CaptureFlag) == "1" {
return

View File

@@ -680,6 +680,7 @@ func describeDataMovement(d *Describer, details bool, info *volume.BackupVolumeI
}
d.Printf("\t\t\t\tData Mover: %s\n", dataMover)
d.Printf("\t\t\t\tUploader Type: %s\n", info.SnapshotDataMovementInfo.UploaderType)
d.Printf("\t\t\t\tMoved data Size (bytes): %d\n", info.SnapshotDataMovementInfo.Size)
} else {
d.Printf("\t\t\tData Movement: %s\n", "included, specify --details for more information")
}

View File

@@ -502,6 +502,7 @@ func TestCSISnapshots(t *testing.T) {
Operation ID: fake-operation-4
Data Mover: velero
Uploader Type: fake-uploader
Moved data Size (bytes): 0
`,
},
{
@@ -516,6 +517,7 @@ func TestCSISnapshots(t *testing.T) {
UploaderType: "fake-uploader",
SnapshotHandle: "fake-repo-id-5",
OperationID: "fake-operation-5",
Size: 100,
},
},
},
@@ -526,6 +528,7 @@ func TestCSISnapshots(t *testing.T) {
Operation ID: fake-operation-5
Data Mover: velero
Uploader Type: fake-uploader
Moved data Size (bytes): 100
`,
},
}

View File

@@ -101,7 +101,7 @@ func TestAsyncBackup(t *testing.T) {
fs.callbacks = test.callbacks
err := fs.StartBackup(AccessPoint{ByPath: test.path}, "", "", false, nil, map[string]string{})
require.Equal(t, nil, err)
require.NoError(t, err)
<-finish
@@ -184,7 +184,7 @@ func TestAsyncRestore(t *testing.T) {
fs.callbacks = test.callbacks
err := fs.StartRestore(test.snapshot, AccessPoint{ByPath: test.path}, map[string]string{})
require.Equal(t, nil, err)
require.NoError(t, err)
<-finish

View File

@@ -36,7 +36,7 @@ func TestManager(t *testing.T) {
assert.Equal(t, ConcurrentLimitExceed, err)
ret := m.GetAsyncBR("job-0")
assert.Equal(t, nil, ret)
assert.Nil(t, ret)
ret = m.GetAsyncBR("job-1")
assert.Equal(t, async_job_1, ret)
@@ -48,5 +48,5 @@ func TestManager(t *testing.T) {
assert.Len(t, m.tracker, 1)
ret = m.GetAsyncBR("job-1")
assert.Equal(t, nil, ret)
assert.Nil(t, ret)
}