Ignore the provided port is already allocated error when restoring the NodePort service (#4336)

Fixes #2308

Signed-off-by: Wenkai Yin(尹文开) <yinw@vmware.com>
This commit is contained in:
Wenkai Yin(尹文开)
2021-11-15 20:25:04 +08:00
committed by GitHub
parent 130602d723
commit 7c4e03e9f9
3 changed files with 129 additions and 1 deletions

View File

@@ -0,0 +1 @@
Ignore the `provided port is already allocated` error when restoring the `NodePort` service

View File

@@ -1238,7 +1238,7 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name)
createdObj, restoreErr := resourceClient.Create(obj)
if apierrors.IsAlreadyExists(restoreErr) {
if isAlreadyExistsError(ctx, obj, restoreErr) {
fromCluster, err := resourceClient.Get(name, metav1.GetOptions{})
if err != nil {
ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
@@ -1335,6 +1335,38 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
return warnings, errs
}
func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, err error) bool {
if err == nil {
return false
}
if apierrors.IsAlreadyExists(err) {
return true
}
// the "invalid value" error rather than "already exists" error returns when restoring nodePort service
// that has nodePort preservation if the same nodePort service already exists.
// If this is the case, the function returns true to avoid reporting error.
// Refer to https://github.com/vmware-tanzu/velero/issues/2308 for more details
if obj.GetKind() != "Service" || !apierrors.IsInvalid(err) {
return false
}
statusErr, ok := err.(*apierrors.StatusError)
if !ok || statusErr.Status().Details == nil || len(statusErr.Status().Details.Causes) == 0 {
return false
}
// make sure all the causes are "port allocated" error
isAllocatedErr := true
for _, cause := range statusErr.Status().Details.Causes {
if !strings.Contains(cause.Message, "provided port is already allocated") {
isAllocatedErr = false
break
}
}
if isAllocatedErr {
ctx.log.Infof("ignore the provided port is already allocated error for service %s", kube.NamespaceAndName(obj))
}
return isAllocatedErr
}
// shouldRenamePV returns a boolean indicating whether a persistent volume should
// be given a new name before being restored, or an error if this cannot be determined.
// A persistent volume will be given a new name if and only if (a) a PV with the

View File

@@ -30,6 +30,7 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
corev1api "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@@ -3021,3 +3022,97 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
})
}
}
func TestIsAlreadyExistsError(t *testing.T) {
tests := []struct {
name string
obj *unstructured.Unstructured
err error
expected bool
}{
{
name: "The input error is IsAlreadyExists error",
err: apierrors.NewAlreadyExists(schema.GroupResource{}, ""),
expected: true,
},
{
name: "The input obj isn't service",
obj: &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Pod",
},
},
expected: false,
},
{
name: "The input error isn't InvalidError",
obj: &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Service",
},
},
err: apierrors.NewBadRequest(""),
expected: false,
},
{
name: "The StatusError contains no causes",
obj: &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Service",
},
},
err: &apierrors.StatusError{
ErrStatus: metav1.Status{
Reason: metav1.StatusReasonInvalid,
},
},
expected: false,
},
{
name: "The causes contains not only port already allocated error",
obj: &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Service",
},
},
err: &apierrors.StatusError{
ErrStatus: metav1.Status{
Reason: metav1.StatusReasonInvalid,
Details: &metav1.StatusDetails{
Causes: []metav1.StatusCause{
{Message: "provided port is already allocated"},
{Message: "other error"},
},
},
},
},
expected: false,
},
{
name: "The causes contains only port already allocated error",
obj: &unstructured.Unstructured{
Object: map[string]interface{}{
"kind": "Service",
},
},
err: &apierrors.StatusError{
ErrStatus: metav1.Status{
Reason: metav1.StatusReasonInvalid,
Details: &metav1.StatusDetails{
Causes: []metav1.StatusCause{
{Message: "provided port is already allocated"},
},
},
},
},
expected: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
assert.Equal(t, test.expected, isAlreadyExistsError(&restoreContext{
log: logrus.StandardLogger(),
}, test.obj, test.err))
})
}
}