mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-08 14:21:18 +00:00
Merge pull request #4514 from ywk253100/2220110_nodeport
Check the existence of the expected service when ignoring the NodePort already allocated error
This commit is contained in:
57
pkg/builder/service_builder.go
Normal file
57
pkg/builder/service_builder.go
Normal file
@@ -0,0 +1,57 @@
|
||||
/*
|
||||
Copyright the Velero contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package builder
|
||||
|
||||
import (
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ServiceBuilder builds Service objects.
|
||||
type ServiceBuilder struct {
|
||||
object *corev1api.Service
|
||||
}
|
||||
|
||||
// ForService is the constructor for a ServiceBuilder.
|
||||
func ForService(ns, name string) *ServiceBuilder {
|
||||
return &ServiceBuilder{
|
||||
object: &corev1api.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: corev1api.SchemeGroupVersion.String(),
|
||||
Kind: "Service",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: ns,
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Result returns the built Service.
|
||||
func (s *ServiceBuilder) Result() *corev1api.Service {
|
||||
return s.object
|
||||
}
|
||||
|
||||
// ObjectMeta applies functional options to the Service's ObjectMeta.
|
||||
func (s *ServiceBuilder) ObjectMeta(opts ...ObjectMetaOpt) *ServiceBuilder {
|
||||
for _, opt := range opts {
|
||||
opt(s.object)
|
||||
}
|
||||
|
||||
return s
|
||||
}
|
||||
@@ -1224,7 +1224,12 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
|
||||
ctx.log.Infof("Attempting to restore %s: %v", obj.GroupVersionKind().Kind, name)
|
||||
createdObj, restoreErr := resourceClient.Create(obj)
|
||||
if isAlreadyExistsError(ctx, obj, restoreErr) {
|
||||
isAlreadyExistsError, err := isAlreadyExistsError(ctx, obj, restoreErr, resourceClient)
|
||||
if err != nil {
|
||||
errs.Add(namespace, err)
|
||||
return warnings, errs
|
||||
}
|
||||
if isAlreadyExistsError {
|
||||
fromCluster, err := resourceClient.Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
ctx.log.Infof("Error retrieving cluster version of %s: %v", kube.NamespaceAndName(obj), err)
|
||||
@@ -1274,7 +1279,8 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
ctx.log.Infof("ServiceAccount %s successfully updated", kube.NamespaceAndName(obj))
|
||||
}
|
||||
default:
|
||||
e := errors.Errorf("could not restore, %s. Warning: the in-cluster version is different than the backed-up version.", restoreErr)
|
||||
e := errors.Errorf("could not restore, %s %q already exists. Warning: the in-cluster version is different than the backed-up version.",
|
||||
obj.GetKind(), obj.GetName())
|
||||
warnings.Add(namespace, e)
|
||||
}
|
||||
return warnings, errs
|
||||
@@ -1321,12 +1327,12 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
|
||||
return warnings, errs
|
||||
}
|
||||
|
||||
func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, err error) bool {
|
||||
func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, err error, client client.Dynamic) (bool, error) {
|
||||
if err == nil {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
if apierrors.IsAlreadyExists(err) {
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
// The "invalid value error" or "internal error" rather than "already exists" error returns when restoring nodePort service in the following two cases:
|
||||
// 1. For NodePort service, the service has nodePort preservation while the same nodePort service already exists. - Get invalid value error
|
||||
@@ -1334,24 +1340,30 @@ func isAlreadyExistsError(ctx *restoreContext, obj *unstructured.Unstructured, e
|
||||
// If this is the case, the function returns true to avoid reporting error.
|
||||
// Refer to https://github.com/vmware-tanzu/velero/issues/2308 for more details
|
||||
if obj.GetKind() != "Service" {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
statusErr, ok := err.(*apierrors.StatusError)
|
||||
if !ok || statusErr.Status().Details == nil || len(statusErr.Status().Details.Causes) == 0 {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
// make sure all the causes are "port allocated" error
|
||||
isAllocatedErr := true
|
||||
for _, cause := range statusErr.Status().Details.Causes {
|
||||
if !strings.Contains(cause.Message, "provided port is already allocated") {
|
||||
isAllocatedErr = false
|
||||
break
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
if isAllocatedErr {
|
||||
ctx.log.Infof("ignore the provided port is already allocated error for service %s", kube.NamespaceAndName(obj))
|
||||
|
||||
// the "already allocated" error may caused by other services, check whether the expected service exists or not
|
||||
if _, err = client.Get(obj.GetName(), metav1.GetOptions{}); err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
ctx.log.Debugf("Service %s not found", kube.NamespaceAndName(obj))
|
||||
return false, nil
|
||||
}
|
||||
return false, errors.Wrapf(err, "Unable to get the service %s while checking the NodePort is already allocated error", kube.NamespaceAndName(obj))
|
||||
}
|
||||
return isAllocatedErr
|
||||
|
||||
ctx.log.Infof("Service %s exists, ignore the provided port is already allocated error", kube.NamespaceAndName(obj))
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// shouldRenamePV returns a boolean indicating whether a persistent volume should
|
||||
|
||||
@@ -3024,10 +3024,11 @@ func Test_resetVolumeBindingInfo(t *testing.T) {
|
||||
|
||||
func TestIsAlreadyExistsError(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
obj *unstructured.Unstructured
|
||||
err error
|
||||
expected bool
|
||||
name string
|
||||
apiResource *test.APIResource
|
||||
obj *unstructured.Unstructured
|
||||
err error
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "The input error is IsAlreadyExists error",
|
||||
@@ -3078,10 +3079,40 @@ func TestIsAlreadyExistsError(t *testing.T) {
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "The causes contains only port already allocated error",
|
||||
name: "Get already allocated error but the service doesn't exist",
|
||||
obj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "Service",
|
||||
"metadata": map[string]interface{}{
|
||||
"namespace": "default",
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
err: &apierrors.StatusError{
|
||||
ErrStatus: metav1.Status{
|
||||
Reason: metav1.StatusReasonInvalid,
|
||||
Details: &metav1.StatusDetails{
|
||||
Causes: []metav1.StatusCause{
|
||||
{Message: "provided port is already allocated"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Get already allocated error and the service exists",
|
||||
apiResource: test.Services(
|
||||
builder.ForService("default", "test").Result(),
|
||||
),
|
||||
obj: &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
"kind": "Service",
|
||||
"metadata": map[string]interface{}{
|
||||
"namespace": "default",
|
||||
"name": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
err: &apierrors.StatusError{
|
||||
@@ -3098,10 +3129,30 @@ func TestIsAlreadyExistsError(t *testing.T) {
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
h := newHarness(t)
|
||||
|
||||
ctx := &restoreContext{
|
||||
log: h.log,
|
||||
dynamicFactory: client.NewDynamicFactory(h.DynamicClient),
|
||||
namespaceClient: h.KubeClient.CoreV1().Namespaces(),
|
||||
}
|
||||
|
||||
if test.apiResource != nil {
|
||||
h.AddItems(t, test.apiResource)
|
||||
}
|
||||
|
||||
client, err := ctx.dynamicFactory.ClientForGroupVersionResource(
|
||||
schema.GroupVersion{Group: "", Version: "v1"},
|
||||
metav1.APIResource{Name: "services"},
|
||||
"default",
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.expected, isAlreadyExistsError(&restoreContext{
|
||||
log: logrus.StandardLogger(),
|
||||
}, test.obj, test.err))
|
||||
result, err := isAlreadyExistsError(ctx, test.obj, test.err, client)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, test.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -162,3 +162,14 @@ func VSLs(items ...metav1.Object) *APIResource {
|
||||
Items: items,
|
||||
}
|
||||
}
|
||||
|
||||
func Services(items ...metav1.Object) *APIResource {
|
||||
return &APIResource{
|
||||
Group: "",
|
||||
Version: "v1",
|
||||
Name: "services",
|
||||
ShortName: "svc",
|
||||
Namespaced: true,
|
||||
Items: items,
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user