mirror of
https://github.com/vmware-tanzu/velero.git
synced 2025-12-23 06:15:21 +00:00
Retry completion status patch for backup and restore resources
Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com> update to design #8063 Signed-off-by: Tiger Kaovilai <tkaovila@redhat.com>
This commit is contained in:
1
changelogs/unreleased/8068-kaovilai
Normal file
1
changelogs/unreleased/8068-kaovilai
Normal file
@@ -0,0 +1 @@
|
||||
add retries with timeout to existing patch calls that moves a backup/restore from InProgress/Finalizing to a final status phase.
|
||||
@@ -37,6 +37,10 @@ and from above non final phases to
|
||||
- Completed
|
||||
- PartiallyFailed
|
||||
|
||||
Once backup/restore is in some phase it will already be reconciled again periodically and do not need additional retry
|
||||
- WaitingForPluginOperations
|
||||
- WaitingForPluginOperationsPartiallyFailed
|
||||
|
||||
## Detailed Design
|
||||
Relevant reconcilers will have `resourceTimeout time.Duration` added to its struct and to parameters of New[Backup|Restore]XReconciler functions.
|
||||
|
||||
|
||||
@@ -18,8 +18,11 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/retry"
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
@@ -36,3 +39,27 @@ func CreateRetryGenerateName(client kbclient.Client, ctx context.Context, obj kb
|
||||
return client.Create(ctx, obj, &kbclient.CreateOptions{})
|
||||
}
|
||||
}
|
||||
|
||||
// CapBackoff provides a backoff with a set backoff cap
|
||||
func CapBackoff(cap time.Duration) wait.Backoff {
|
||||
if cap < 0 {
|
||||
cap = 0
|
||||
}
|
||||
return wait.Backoff{
|
||||
Steps: math.MaxInt,
|
||||
Duration: 10 * time.Millisecond,
|
||||
Cap: cap,
|
||||
Factor: retry.DefaultBackoff.Factor,
|
||||
Jitter: retry.DefaultBackoff.Jitter,
|
||||
}
|
||||
}
|
||||
|
||||
// RetryOnRetriableMaxBackOff accepts a patch function param, retrying when the provided retriable function returns true.
|
||||
func RetryOnRetriableMaxBackOff(maxDuration time.Duration, fn func() error, retriable func(error) bool) error {
|
||||
return retry.OnError(CapBackoff(maxDuration), func(err error) bool { return retriable(err) }, fn)
|
||||
}
|
||||
|
||||
// RetryOnErrorMaxBackOff accepts a patch function param, retrying when the error is not nil.
|
||||
func RetryOnErrorMaxBackOff(maxDuration time.Duration, fn func() error) error {
|
||||
return RetryOnRetriableMaxBackOff(maxDuration, fn, func(err error) bool { return err != nil })
|
||||
}
|
||||
|
||||
@@ -689,6 +689,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
backupStoreGetter,
|
||||
s.logger,
|
||||
s.metrics,
|
||||
s.config.ResourceTimeout,
|
||||
)
|
||||
if err := r.SetupWithManager(s.mgr); err != nil {
|
||||
s.logger.Fatal(err, "unable to create controller", "controller", constant.ControllerBackupFinalizer)
|
||||
@@ -814,6 +815,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.config.DefaultItemOperationTimeout,
|
||||
s.config.DisableInformerCache,
|
||||
s.crClient,
|
||||
s.config.ResourceTimeout,
|
||||
)
|
||||
|
||||
if err = r.SetupWithManager(s.mgr); err != nil {
|
||||
|
||||
@@ -247,7 +247,10 @@ func (b *backupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
request.Status.StartTimestamp = &metav1.Time{Time: b.clock.Now()}
|
||||
}
|
||||
|
||||
// update status
|
||||
// update status to
|
||||
// BackupPhaseFailedValidation
|
||||
// BackupPhaseInProgress
|
||||
// if patch fail, backup can reconcile again as phase would still be "" or New
|
||||
if err := kubeutil.PatchResource(original, request.Backup, b.kbClient); err != nil {
|
||||
return ctrl.Result{}, errors.Wrapf(err, "error updating Backup status to %s", request.Status.Phase)
|
||||
}
|
||||
@@ -304,9 +307,16 @@ func (b *backupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctr
|
||||
b.metrics.RegisterBackupValidationFailure(backupScheduleName)
|
||||
b.metrics.RegisterBackupLastStatus(backupScheduleName, metrics.BackupLastStatusFailure)
|
||||
}
|
||||
log.Info("Updating backup's final status")
|
||||
if err := kubeutil.PatchResource(original, request.Backup, b.kbClient); err != nil {
|
||||
log.WithError(err).Error("error updating backup's final status")
|
||||
log.Info("Updating backup's status")
|
||||
// Phases were updated in runBackup()
|
||||
// This patch with retry update Phase from InProgress to
|
||||
// BackupPhaseWaitingForPluginOperations -> backup_operations_controller.go will now reconcile
|
||||
// BackupPhaseWaitingForPluginOperationsPartiallyFailed -> backup_operations_controller.go will now reconcile
|
||||
// BackupPhaseFinalizing -> backup_finalizer_controller.go will now reconcile
|
||||
// BackupPhaseFinalizingPartiallyFailed -> backup_finalizer_controller.go will now reconcile
|
||||
// BackupPhaseFailed
|
||||
if err := kubeutil.PatchResourceWithRetriesOnErrors(b.resourceTimeout, original, request.Backup, b.kbClient); err != nil {
|
||||
log.WithError(err).Errorf("error updating backup's status from %v to %v", original.Status.Phase, request.Backup.Status.Phase)
|
||||
}
|
||||
return ctrl.Result{}, nil
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
@@ -31,6 +32,7 @@ import (
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
pkgbackup "github.com/vmware-tanzu/velero/pkg/backup"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/itemoperation"
|
||||
"github.com/vmware-tanzu/velero/pkg/kuberesource"
|
||||
"github.com/vmware-tanzu/velero/pkg/metrics"
|
||||
@@ -51,6 +53,7 @@ type backupFinalizerReconciler struct {
|
||||
metrics *metrics.ServerMetrics
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
log logrus.FieldLogger
|
||||
resourceTimeout time.Duration
|
||||
}
|
||||
|
||||
// NewBackupFinalizerReconciler initializes and returns backupFinalizerReconciler struct.
|
||||
@@ -64,6 +67,7 @@ func NewBackupFinalizerReconciler(
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter,
|
||||
log logrus.FieldLogger,
|
||||
metrics *metrics.ServerMetrics,
|
||||
resourceTimeout time.Duration,
|
||||
) *backupFinalizerReconciler {
|
||||
return &backupFinalizerReconciler{
|
||||
client: client,
|
||||
@@ -119,7 +123,11 @@ func (r *backupFinalizerReconciler) Reconcile(ctx context.Context, req ctrl.Requ
|
||||
r.backupTracker.Delete(backup.Namespace, backup.Name)
|
||||
}
|
||||
// Always attempt to Patch the backup object and status after each reconciliation.
|
||||
if err := r.client.Patch(ctx, backup, kbclient.MergeFrom(original)); err != nil {
|
||||
//
|
||||
// if this patch fails, there may not be another opportunity to update the backup object without external update event.
|
||||
// so we retry
|
||||
// This retries updating Finalzing/FinalizingPartiallyFailed to Completed/PartiallyFailed
|
||||
if err := client.RetryOnErrorMaxBackOff(r.resourceTimeout, func() error { return r.client.Patch(ctx, backup, kbclient.MergeFrom(original)) }); err != nil {
|
||||
log.WithError(err).Error("Error updating backup")
|
||||
return
|
||||
}
|
||||
|
||||
@@ -58,6 +58,7 @@ func mockBackupFinalizerReconciler(fakeClient kbclient.Client, fakeGlobalClient
|
||||
NewFakeSingleObjectBackupStoreGetter(backupStore),
|
||||
logrus.StandardLogger(),
|
||||
metrics.NewServerMetrics(),
|
||||
10*time.Minute,
|
||||
), backupper
|
||||
}
|
||||
func TestBackupFinalizerReconcile(t *testing.T) {
|
||||
|
||||
@@ -109,6 +109,7 @@ type restoreReconciler struct {
|
||||
newPluginManager func(logger logrus.FieldLogger) clientmgmt.Manager
|
||||
backupStoreGetter persistence.ObjectBackupStoreGetter
|
||||
globalCrClient client.Client
|
||||
resourceTimeout time.Duration
|
||||
}
|
||||
|
||||
type backupInfo struct {
|
||||
@@ -130,6 +131,7 @@ func NewRestoreReconciler(
|
||||
defaultItemOperationTimeout time.Duration,
|
||||
disableInformerCache bool,
|
||||
globalCrClient client.Client,
|
||||
resourceTimeout time.Duration,
|
||||
) *restoreReconciler {
|
||||
r := &restoreReconciler{
|
||||
ctx: ctx,
|
||||
@@ -149,7 +151,8 @@ func NewRestoreReconciler(
|
||||
newPluginManager: newPluginManager,
|
||||
backupStoreGetter: backupStoreGetter,
|
||||
|
||||
globalCrClient: globalCrClient,
|
||||
globalCrClient: globalCrClient,
|
||||
resourceTimeout: resourceTimeout,
|
||||
}
|
||||
|
||||
// Move the periodical backup and restore metrics computing logic from controllers to here.
|
||||
@@ -246,6 +249,7 @@ func (r *restoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
||||
}
|
||||
|
||||
// patch to update status and persist to API
|
||||
// This is patching from "" or New, no retry needed
|
||||
err = kubeutil.PatchResource(original, restore, r.kbClient)
|
||||
if err != nil {
|
||||
// return the error so the restore can be re-processed; it's currently
|
||||
@@ -274,10 +278,15 @@ func (r *restoreReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ct
|
||||
restore.Status.Phase == api.RestorePhaseCompleted {
|
||||
restore.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()}
|
||||
}
|
||||
log.Debug("Updating restore's final status")
|
||||
|
||||
if err = kubeutil.PatchResource(original, restore, r.kbClient); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Info("Error updating restore's final status")
|
||||
log.Debug("Updating restore's status")
|
||||
// Phases were updated in runValidatedRestore
|
||||
// This patch with retry update Phase from InProgress to
|
||||
// WaitingForPluginOperations
|
||||
// WaitingForPluginOperationsPartiallyFailed
|
||||
// Finalizing
|
||||
// FinalizingPartiallyFailed
|
||||
if err = kubeutil.PatchResourceWithRetriesOnErrors(r.resourceTimeout, original, restore, r.kbClient); err != nil {
|
||||
log.WithError(errors.WithStack(err)).Infof("Error updating restore's status from %v to %v", original.Status.Phase, restore.Status.Phase)
|
||||
// No need to re-enqueue here, because restore's already set to InProgress before.
|
||||
// Controller only handle New restore.
|
||||
}
|
||||
|
||||
@@ -116,6 +116,7 @@ func TestFetchBackupInfo(t *testing.T) {
|
||||
60*time.Minute,
|
||||
false,
|
||||
fakeGlobalClient,
|
||||
10*time.Minute,
|
||||
)
|
||||
|
||||
if test.backupStoreError == nil {
|
||||
@@ -196,6 +197,7 @@ func TestProcessQueueItemSkips(t *testing.T) {
|
||||
60*time.Minute,
|
||||
false,
|
||||
fakeGlobalClient,
|
||||
10*time.Minute,
|
||||
)
|
||||
|
||||
_, err := r.Reconcile(context.Background(), ctrl.Request{NamespacedName: types.NamespacedName{
|
||||
@@ -498,6 +500,7 @@ func TestRestoreReconcile(t *testing.T) {
|
||||
60*time.Minute,
|
||||
false,
|
||||
fakeGlobalClient,
|
||||
10*time.Minute,
|
||||
)
|
||||
|
||||
r.clock = clocktesting.NewFakeClock(now)
|
||||
@@ -681,6 +684,7 @@ func TestValidateAndCompleteWhenScheduleNameSpecified(t *testing.T) {
|
||||
60*time.Minute,
|
||||
false,
|
||||
fakeGlobalClient,
|
||||
10*time.Minute,
|
||||
)
|
||||
|
||||
restore := &velerov1api.Restore{
|
||||
@@ -776,6 +780,7 @@ func TestValidateAndCompleteWithResourceModifierSpecified(t *testing.T) {
|
||||
60*time.Minute,
|
||||
false,
|
||||
fakeGlobalClient,
|
||||
10*time.Minute,
|
||||
)
|
||||
|
||||
restore := &velerov1api.Restore{
|
||||
|
||||
@@ -233,8 +233,10 @@ func (r *restoreFinalizerReconciler) finishProcessing(restorePhase velerov1api.R
|
||||
r.metrics.RegisterRestoreSuccess(restore.Spec.ScheduleName)
|
||||
}
|
||||
restore.Status.CompletionTimestamp = &metav1.Time{Time: r.clock.Now()}
|
||||
|
||||
return kubeutil.PatchResource(original, restore, r.Client)
|
||||
// retry `Finalizing`/`FinalizingPartiallyFailed` to
|
||||
// - `Completed`
|
||||
// - `PartiallyFailed`
|
||||
return kubeutil.PatchResourceWithRetriesOnErrors(r.resourceTimeout, original, restore, r.Client)
|
||||
}
|
||||
|
||||
// finalizerContext includes all the dependencies required by finalization tasks and
|
||||
|
||||
@@ -19,20 +19,19 @@ package controller
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/mock"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
testclocks "k8s.io/utils/clock/testing"
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/stretchr/testify/mock"
|
||||
|
||||
corev1api "k8s.io/api/core/v1"
|
||||
crclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/hook"
|
||||
@@ -44,6 +43,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/plugin/clientmgmt"
|
||||
pluginmocks "github.com/vmware-tanzu/velero/pkg/plugin/mocks"
|
||||
velerotest "github.com/vmware-tanzu/velero/pkg/test"
|
||||
pkgUtilKubeMocks "github.com/vmware-tanzu/velero/pkg/util/kube/mocks"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/results"
|
||||
)
|
||||
|
||||
@@ -556,3 +556,74 @@ func TestWaitRestoreExecHook(t *testing.T) {
|
||||
assert.Equal(t, tc.expectedHooksFailed, updated.Status.HookStatus.HooksFailed)
|
||||
}
|
||||
}
|
||||
|
||||
// test finishprocessing with mocks of kube client to simulate connection refused
|
||||
func Test_restoreFinalizerReconciler_finishProcessing(t *testing.T) {
|
||||
type args struct {
|
||||
// mockClientActions simulate different client errors
|
||||
mockClientActions func(*pkgUtilKubeMocks.Client)
|
||||
// return bool indicating if the client method was called as expected
|
||||
mockClientAsserts func(*pkgUtilKubeMocks.Client) bool
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "restore failed to patch status, should retry on connection refused",
|
||||
args: args{
|
||||
mockClientActions: func(client *pkgUtilKubeMocks.Client) {
|
||||
client.On("Patch", mock.Anything, mock.Anything, mock.Anything).Return(syscall.ECONNREFUSED).Once()
|
||||
client.On("Patch", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
},
|
||||
mockClientAsserts: func(client *pkgUtilKubeMocks.Client) bool {
|
||||
return client.AssertNumberOfCalls(t, "Patch", 2)
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "restore failed to patch status, retry on connection refused until max retries",
|
||||
args: args{
|
||||
mockClientActions: func(client *pkgUtilKubeMocks.Client) {
|
||||
client.On("Patch", mock.Anything, mock.Anything, mock.Anything).Return(syscall.ECONNREFUSED)
|
||||
},
|
||||
mockClientAsserts: func(client *pkgUtilKubeMocks.Client) bool {
|
||||
return len(client.Calls) > 2
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "restore patch status ok, should not retry",
|
||||
args: args{
|
||||
mockClientActions: func(client *pkgUtilKubeMocks.Client) {
|
||||
client.On("Patch", mock.Anything, mock.Anything, mock.Anything).Return(nil)
|
||||
},
|
||||
mockClientAsserts: func(client *pkgUtilKubeMocks.Client) bool {
|
||||
return client.AssertNumberOfCalls(t, "Patch", 1)
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
client := pkgUtilKubeMocks.NewClient(t)
|
||||
// mock client actions
|
||||
tt.args.mockClientActions(client)
|
||||
r := &restoreFinalizerReconciler{
|
||||
Client: client,
|
||||
metrics: metrics.NewServerMetrics(),
|
||||
clock: testclocks.NewFakeClock(time.Now()),
|
||||
resourceTimeout: 1 * time.Second,
|
||||
}
|
||||
restore := builder.ForRestore(velerov1api.DefaultNamespace, "restoreName").Result()
|
||||
if err := r.finishProcessing(velerov1api.RestorePhaseInProgress, restore, restore); (err != nil) != tt.wantErr {
|
||||
t.Errorf("restoreFinalizerReconciler.finishProcessing() error = %v, wantErr %v", err, tt.wantErr)
|
||||
}
|
||||
if !tt.args.mockClientAsserts(client) {
|
||||
t.Errorf("mockClientAsserts() failed")
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,8 +18,11 @@ package kube
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
veleroPkgClient "github.com/vmware-tanzu/velero/pkg/client"
|
||||
)
|
||||
|
||||
func PatchResource(original, updated client.Object, kbClient client.Client) error {
|
||||
@@ -27,3 +30,19 @@ func PatchResource(original, updated client.Object, kbClient client.Client) erro
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// PatchResourceWithRetries patches the original resource with the updated resource, retrying when the provided retriable function returns true.
|
||||
func PatchResourceWithRetries(maxDuration time.Duration, original, updated client.Object, kbClient client.Client, retriable func(error) bool) error {
|
||||
return veleroPkgClient.RetryOnRetriableMaxBackOff(maxDuration, func() error { return PatchResource(original, updated, kbClient) }, retriable)
|
||||
}
|
||||
|
||||
// PatchResourceWithRetriesOnErrors patches the original resource with the updated resource, retrying when the operation returns an error.
|
||||
func PatchResourceWithRetriesOnErrors(maxDuration time.Duration, original, updated client.Object, kbClient client.Client) error {
|
||||
return PatchResourceWithRetries(maxDuration, original, updated, kbClient, func(err error) bool {
|
||||
// retry using DefaultBackoff to resolve connection refused error that may occur when the server is under heavy load
|
||||
// TODO: consider using a more specific error type to retry, for now, we retry on all errors
|
||||
// specific errors:
|
||||
// - connection refused: https://pkg.go.dev/syscall#:~:text=Errno(0x67)-,ECONNREFUSED,-%3D%20Errno(0x6f
|
||||
return err != nil
|
||||
})
|
||||
}
|
||||
|
||||
26
pkg/util/kube/mocks.go
Normal file
26
pkg/util/kube/mocks.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package kube
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/api/meta"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
)
|
||||
|
||||
// Client knows how to perform CRUD operations on Kubernetes objects.
|
||||
// go:generate mockery --name=Client
|
||||
type Client interface {
|
||||
client.Reader
|
||||
client.Writer
|
||||
client.StatusClient
|
||||
client.SubResourceClientConstructor
|
||||
|
||||
// Scheme returns the scheme this client is using.
|
||||
Scheme() *runtime.Scheme
|
||||
// RESTMapper returns the rest this client is using.
|
||||
RESTMapper() meta.RESTMapper
|
||||
// GroupVersionKindFor returns the GroupVersionKind for the given object.
|
||||
GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error)
|
||||
// IsObjectNamespaced returns true if the GroupVersionKind of the object is namespaced.
|
||||
IsObjectNamespaced(obj runtime.Object) (bool, error)
|
||||
}
|
||||
349
pkg/util/kube/mocks/Client.go
Normal file
349
pkg/util/kube/mocks/Client.go
Normal file
@@ -0,0 +1,349 @@
|
||||
// Code generated by mockery v2.42.1. DO NOT EDIT.
|
||||
|
||||
package mocks
|
||||
|
||||
import (
|
||||
context "context"
|
||||
|
||||
client "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
meta "k8s.io/apimachinery/pkg/api/meta"
|
||||
|
||||
mock "github.com/stretchr/testify/mock"
|
||||
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// Client is an autogenerated mock type for the Client type
|
||||
type Client struct {
|
||||
mock.Mock
|
||||
}
|
||||
|
||||
// Create provides a mock function with given fields: ctx, obj, opts
|
||||
func (_m *Client) Create(ctx context.Context, obj client.Object, opts ...client.CreateOption) error {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, obj)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Create")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, client.Object, ...client.CreateOption) error); ok {
|
||||
r0 = rf(ctx, obj, opts...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Delete provides a mock function with given fields: ctx, obj, opts
|
||||
func (_m *Client) Delete(ctx context.Context, obj client.Object, opts ...client.DeleteOption) error {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, obj)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Delete")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, client.Object, ...client.DeleteOption) error); ok {
|
||||
r0 = rf(ctx, obj, opts...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// DeleteAllOf provides a mock function with given fields: ctx, obj, opts
|
||||
func (_m *Client) DeleteAllOf(ctx context.Context, obj client.Object, opts ...client.DeleteAllOfOption) error {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, obj)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for DeleteAllOf")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, client.Object, ...client.DeleteAllOfOption) error); ok {
|
||||
r0 = rf(ctx, obj, opts...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Get provides a mock function with given fields: ctx, key, obj, opts
|
||||
func (_m *Client) Get(ctx context.Context, key types.NamespacedName, obj client.Object, opts ...client.GetOption) error {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, key, obj)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Get")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, types.NamespacedName, client.Object, ...client.GetOption) error); ok {
|
||||
r0 = rf(ctx, key, obj, opts...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// GroupVersionKindFor provides a mock function with given fields: obj
|
||||
func (_m *Client) GroupVersionKindFor(obj runtime.Object) (schema.GroupVersionKind, error) {
|
||||
ret := _m.Called(obj)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for GroupVersionKindFor")
|
||||
}
|
||||
|
||||
var r0 schema.GroupVersionKind
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(runtime.Object) (schema.GroupVersionKind, error)); ok {
|
||||
return rf(obj)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(runtime.Object) schema.GroupVersionKind); ok {
|
||||
r0 = rf(obj)
|
||||
} else {
|
||||
r0 = ret.Get(0).(schema.GroupVersionKind)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(runtime.Object) error); ok {
|
||||
r1 = rf(obj)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// IsObjectNamespaced provides a mock function with given fields: obj
|
||||
func (_m *Client) IsObjectNamespaced(obj runtime.Object) (bool, error) {
|
||||
ret := _m.Called(obj)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for IsObjectNamespaced")
|
||||
}
|
||||
|
||||
var r0 bool
|
||||
var r1 error
|
||||
if rf, ok := ret.Get(0).(func(runtime.Object) (bool, error)); ok {
|
||||
return rf(obj)
|
||||
}
|
||||
if rf, ok := ret.Get(0).(func(runtime.Object) bool); ok {
|
||||
r0 = rf(obj)
|
||||
} else {
|
||||
r0 = ret.Get(0).(bool)
|
||||
}
|
||||
|
||||
if rf, ok := ret.Get(1).(func(runtime.Object) error); ok {
|
||||
r1 = rf(obj)
|
||||
} else {
|
||||
r1 = ret.Error(1)
|
||||
}
|
||||
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// List provides a mock function with given fields: ctx, list, opts
|
||||
func (_m *Client) List(ctx context.Context, list client.ObjectList, opts ...client.ListOption) error {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, list)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for List")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, client.ObjectList, ...client.ListOption) error); ok {
|
||||
r0 = rf(ctx, list, opts...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Patch provides a mock function with given fields: ctx, obj, patch, opts
|
||||
func (_m *Client) Patch(ctx context.Context, obj client.Object, patch client.Patch, opts ...client.PatchOption) error {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, obj, patch)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Patch")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, client.Object, client.Patch, ...client.PatchOption) error); ok {
|
||||
r0 = rf(ctx, obj, patch, opts...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// RESTMapper provides a mock function with given fields:
|
||||
func (_m *Client) RESTMapper() meta.RESTMapper {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for RESTMapper")
|
||||
}
|
||||
|
||||
var r0 meta.RESTMapper
|
||||
if rf, ok := ret.Get(0).(func() meta.RESTMapper); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(meta.RESTMapper)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Scheme provides a mock function with given fields:
|
||||
func (_m *Client) Scheme() *runtime.Scheme {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Scheme")
|
||||
}
|
||||
|
||||
var r0 *runtime.Scheme
|
||||
if rf, ok := ret.Get(0).(func() *runtime.Scheme); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(*runtime.Scheme)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Status provides a mock function with given fields:
|
||||
func (_m *Client) Status() client.SubResourceWriter {
|
||||
ret := _m.Called()
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Status")
|
||||
}
|
||||
|
||||
var r0 client.SubResourceWriter
|
||||
if rf, ok := ret.Get(0).(func() client.SubResourceWriter); ok {
|
||||
r0 = rf()
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(client.SubResourceWriter)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// SubResource provides a mock function with given fields: subResource
|
||||
func (_m *Client) SubResource(subResource string) client.SubResourceClient {
|
||||
ret := _m.Called(subResource)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for SubResource")
|
||||
}
|
||||
|
||||
var r0 client.SubResourceClient
|
||||
if rf, ok := ret.Get(0).(func(string) client.SubResourceClient); ok {
|
||||
r0 = rf(subResource)
|
||||
} else {
|
||||
if ret.Get(0) != nil {
|
||||
r0 = ret.Get(0).(client.SubResourceClient)
|
||||
}
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// Update provides a mock function with given fields: ctx, obj, opts
|
||||
func (_m *Client) Update(ctx context.Context, obj client.Object, opts ...client.UpdateOption) error {
|
||||
_va := make([]interface{}, len(opts))
|
||||
for _i := range opts {
|
||||
_va[_i] = opts[_i]
|
||||
}
|
||||
var _ca []interface{}
|
||||
_ca = append(_ca, ctx, obj)
|
||||
_ca = append(_ca, _va...)
|
||||
ret := _m.Called(_ca...)
|
||||
|
||||
if len(ret) == 0 {
|
||||
panic("no return value specified for Update")
|
||||
}
|
||||
|
||||
var r0 error
|
||||
if rf, ok := ret.Get(0).(func(context.Context, client.Object, ...client.UpdateOption) error); ok {
|
||||
r0 = rf(ctx, obj, opts...)
|
||||
} else {
|
||||
r0 = ret.Error(0)
|
||||
}
|
||||
|
||||
return r0
|
||||
}
|
||||
|
||||
// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations.
|
||||
// The first argument is typically a *testing.T value.
|
||||
func NewClient(t interface {
|
||||
mock.TestingT
|
||||
Cleanup(func())
|
||||
}) *Client {
|
||||
mock := &Client{}
|
||||
mock.Mock.Test(t)
|
||||
|
||||
t.Cleanup(func() { mock.AssertExpectations(t) })
|
||||
|
||||
return mock
|
||||
}
|
||||
Reference in New Issue
Block a user