mirror of
https://github.com/vmware-tanzu/velero.git
synced 2026-01-06 13:26:26 +00:00
Convert manifests + BSL api client to kubebuilder (#2561)
* kubebuilder init - minimalist version Signed-off-by: Carlisia <carlisia@vmware.com> * Add back main.go, apparently kb needs it Signed-off-by: Carlisia <carlisia@vmware.com> * Tweak makefile to accomodate kubebuilder expectations Signed-off-by: Carlisia <carlisia@vmware.com> * Port BSL to kubebuilder api client Signed-off-by: Carlisia <carlisia@vmware.com> * s/cache/client bc client fetches from cache And other naming improvements Signed-off-by: Carlisia <carlisia@vmware.com> * So, .GetAPIReader is how we bypass the cache In this case, the cache hasn't started yet Signed-off-by: Carlisia <carlisia@vmware.com> * Oh that's what this code was for... adding back We still need to embed the CRDs as binary data in the Velero binary to access the generated CRDs at runtime. Signed-off-by: Carlisia <carlisia@vmware.com> * Tie in CRD/code generation w/ existing scripts Signed-off-by: Carlisia <carlisia@vmware.com> * Mostly result of running update-fmt, updated file formatting Signed-off-by: Carlisia <carlisia@vmware.com> * Just a copyright fix Signed-off-by: Carlisia <carlisia@vmware.com> * All the test fixes Signed-off-by: Carlisia <carlisia@vmware.com> * Add changelog + some cleanup Signed-off-by: Carlisia <carlisia@vmware.com> * Update backup manifest Signed-off-by: Carlisia <carlisia@vmware.com> * Remove unneeded auto-generated files Signed-off-by: Carlisia <carlisia@vmware.com> * Keep everything in the same (existing) package Signed-off-by: Carlisia <carlisia@vmware.com> * Fix/clean scripts, generated code, and calls Deleting the entire `generated` directory and running `make update` works. Modifying an api and running `make verify` works as expected. Signed-off-by: Carlisia <carlisia@vmware.com> * Clean up schema and client calls + code reviews Signed-off-by: Carlisia <carlisia@vmware.com> * Move all code gen to inside builder container Signed-off-by: Carlisia <carlisia@vmware.com> * Address code review Signed-off-by: Carlisia <carlisia@vmware.com> * Fix imports/aliases Signed-off-by: Carlisia <carlisia@vmware.com> * More code reviews Signed-off-by: Carlisia <carlisia@vmware.com> * Add waitforcachesync Signed-off-by: Carlisia <carlisia@vmware.com> * Have manager register ALL controllers This will allow for proper cache management. Signed-off-by: Carlisia <carlisia@vmware.com> * Status subresource is now enabled; cleanup Signed-off-by: Carlisia <carlisia@vmware.com> * More code reviews Signed-off-by: Carlisia <carlisia@vmware.com> * Clean up Signed-off-by: Carlisia <carlisia@vmware.com> * Manager registers ALL controllers for restic too Signed-off-by: Carlisia <carlisia@vmware.com> * More code reviews Signed-off-by: Carlisia <carlisia@vmware.com> * Add deprecation warning/todo Signed-off-by: Carlisia <carlisia@vmware.com> * Add documentation Signed-off-by: Carlisia <carlisia@vmware.com> * Add helpful comments Signed-off-by: Carlisia <carlisia@vmware.com> * Address code review Signed-off-by: Carlisia <carlisia@vmware.com> * More idiomatic Runnable Signed-off-by: Carlisia <carlisia@vmware.com> * Clean up imports Signed-off-by: Carlisia <carlisia@vmware.com>
This commit is contained in:
@@ -17,9 +17,12 @@ limitations under the License.
|
||||
package backup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
@@ -146,13 +149,22 @@ func (o *CreateOptions) Validate(c *cobra.Command, args []string, f client.Facto
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Ensure that unless FromSchedule is set, args contains a backup name
|
||||
if o.FromSchedule == "" && len(args) != 1 {
|
||||
return fmt.Errorf("a backup name is required, unless you are creating based on a schedule")
|
||||
}
|
||||
|
||||
if o.StorageLocation != "" {
|
||||
if _, err := o.client.VeleroV1().BackupStorageLocations(f.Namespace()).Get(o.StorageLocation, metav1.GetOptions{}); err != nil {
|
||||
location := &velerov1api.BackupStorageLocation{}
|
||||
if err := client.Get(context.Background(), kbclient.ObjectKey{
|
||||
Namespace: f.Namespace(),
|
||||
Name: o.StorageLocation,
|
||||
}, location); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ limitations under the License.
|
||||
package backuplocation
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -26,6 +27,8 @@ import (
|
||||
"github.com/spf13/pflag"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd"
|
||||
@@ -146,12 +149,12 @@ func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
|
||||
return err
|
||||
}
|
||||
|
||||
client, err := f.Client()
|
||||
client, err := f.KubebuilderClient()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := client.VeleroV1().BackupStorageLocations(backupStorageLocation.Namespace).Create(backupStorageLocation); err != nil {
|
||||
if err := client.Create(context.Background(), backupStorageLocation, &kbclient.CreateOptions{}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -17,10 +17,14 @@ limitations under the License.
|
||||
package backuplocation
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/util/output"
|
||||
@@ -36,19 +40,24 @@ func NewGetCommand(f client.Factory, use string) *cobra.Command {
|
||||
err := output.ValidateFlags(c)
|
||||
cmd.CheckError(err)
|
||||
|
||||
veleroClient, err := f.Client()
|
||||
client, err := f.KubebuilderClient()
|
||||
cmd.CheckError(err)
|
||||
|
||||
var locations *api.BackupStorageLocationList
|
||||
locations := new(velerov1api.BackupStorageLocationList)
|
||||
if len(args) > 0 {
|
||||
locations = new(api.BackupStorageLocationList)
|
||||
location := &velerov1api.BackupStorageLocation{}
|
||||
for _, name := range args {
|
||||
location, err := veleroClient.VeleroV1().BackupStorageLocations(f.Namespace()).Get(name, metav1.GetOptions{})
|
||||
err = client.Get(context.Background(), kbclient.ObjectKey{
|
||||
Namespace: f.Namespace(),
|
||||
Name: name,
|
||||
}, location)
|
||||
cmd.CheckError(err)
|
||||
locations.Items = append(locations.Items, *location)
|
||||
}
|
||||
} else {
|
||||
locations, err = veleroClient.VeleroV1().BackupStorageLocations(f.Namespace()).List(listOptions)
|
||||
err := client.List(context.Background(), locations, &kbclient.ListOptions{
|
||||
Namespace: f.Namespace(),
|
||||
})
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
|
||||
@@ -20,19 +20,24 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/util/managercontroller"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/buildinfo"
|
||||
"github.com/vmware-tanzu/velero/pkg/client"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd"
|
||||
@@ -43,6 +48,13 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/filesystem"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
|
||||
"sigs.k8s.io/controller-runtime/pkg/log/zap"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
)
|
||||
|
||||
var (
|
||||
scheme = runtime.NewScheme()
|
||||
)
|
||||
|
||||
func NewServerCommand(f client.Factory) *cobra.Command {
|
||||
@@ -86,6 +98,7 @@ type resticServer struct {
|
||||
ctx context.Context
|
||||
cancelFunc context.CancelFunc
|
||||
fileSystem filesystem.Interface
|
||||
mgr manager.Manager
|
||||
}
|
||||
|
||||
func newResticServer(logger logrus.FieldLogger, factory client.Factory) (*resticServer, error) {
|
||||
@@ -130,6 +143,20 @@ func newResticServer(logger logrus.FieldLogger, factory client.Factory) (*restic
|
||||
|
||||
ctx, cancelFunc := context.WithCancel(context.Background())
|
||||
|
||||
clientConfig, err := factory.ClientConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ctrl.SetLogger(zap.New(zap.UseDevMode(true)))
|
||||
velerov1api.AddToScheme(scheme)
|
||||
mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &resticServer{
|
||||
kubeClient: kubeClient,
|
||||
veleroClient: veleroClient,
|
||||
@@ -141,6 +168,7 @@ func newResticServer(logger logrus.FieldLogger, factory client.Factory) (*restic
|
||||
ctx: ctx,
|
||||
cancelFunc: cancelFunc,
|
||||
fileSystem: filesystem.NewFileSystem(),
|
||||
mgr: mgr,
|
||||
}
|
||||
|
||||
if err := s.validatePodVolumesHostPath(); err != nil {
|
||||
@@ -155,8 +183,6 @@ func (s *resticServer) run() {
|
||||
|
||||
s.logger.Info("Starting controllers")
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
backupController := controller.NewPodVolumeBackupController(
|
||||
s.logger,
|
||||
s.veleroInformerFactory.Velero().V1().PodVolumeBackups(),
|
||||
@@ -165,14 +191,9 @@ func (s *resticServer) run() {
|
||||
s.secretInformer,
|
||||
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
s.kubeInformerFactory.Core().V1().PersistentVolumes(),
|
||||
s.veleroInformerFactory.Velero().V1().BackupStorageLocations(),
|
||||
s.mgr.GetClient(),
|
||||
os.Getenv("NODE_NAME"),
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
backupController.Run(s.ctx, 1)
|
||||
}()
|
||||
|
||||
restoreController := controller.NewPodVolumeRestoreController(
|
||||
s.logger,
|
||||
@@ -182,26 +203,30 @@ func (s *resticServer) run() {
|
||||
s.secretInformer,
|
||||
s.kubeInformerFactory.Core().V1().PersistentVolumeClaims(),
|
||||
s.kubeInformerFactory.Core().V1().PersistentVolumes(),
|
||||
s.veleroInformerFactory.Velero().V1().BackupStorageLocations(),
|
||||
s.mgr.GetClient(),
|
||||
os.Getenv("NODE_NAME"),
|
||||
)
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
restoreController.Run(s.ctx, 1)
|
||||
}()
|
||||
|
||||
go s.veleroInformerFactory.Start(s.ctx.Done())
|
||||
go s.kubeInformerFactory.Start(s.ctx.Done())
|
||||
go s.podInformer.Run(s.ctx.Done())
|
||||
go s.secretInformer.Run(s.ctx.Done())
|
||||
|
||||
s.logger.Info("Controllers started successfully")
|
||||
// TODO(2.0): presuming all controllers and resources are converted to runtime-controller
|
||||
// by v2.0, the block from this line and including the `s.mgr.Start() will be
|
||||
// deprecated, since the manager auto-starts all the caches. Until then, we need to start the
|
||||
// cache for them manually.
|
||||
|
||||
<-s.ctx.Done()
|
||||
// Adding the controllers to the manager will register them as a (runtime-controller) runnable,
|
||||
// so the manager will ensure the cache is started and ready before all controller are started
|
||||
s.mgr.Add(managercontroller.Runnable(backupController, 1))
|
||||
s.mgr.Add(managercontroller.Runnable(restoreController, 1))
|
||||
|
||||
s.logger.Info("Waiting for all controllers to shut down gracefully")
|
||||
wg.Wait()
|
||||
s.logger.Info("Controllers starting...")
|
||||
|
||||
if err := s.mgr.Start(ctrl.SetupSignalHandler()); err != nil {
|
||||
s.logger.Fatal("Problem starting manager", err)
|
||||
}
|
||||
}
|
||||
|
||||
// validatePodVolumesHostPath validates that the pod volumes path contains a
|
||||
|
||||
@@ -25,7 +25,6 @@ import (
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -34,6 +33,7 @@ import (
|
||||
"github.com/spf13/cobra"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
@@ -56,6 +56,7 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/util/flag"
|
||||
"github.com/vmware-tanzu/velero/pkg/cmd/util/signals"
|
||||
|
||||
"github.com/vmware-tanzu/velero/pkg/controller"
|
||||
velerodiscovery "github.com/vmware-tanzu/velero/pkg/discovery"
|
||||
"github.com/vmware-tanzu/velero/pkg/features"
|
||||
@@ -68,6 +69,14 @@ import (
|
||||
"github.com/vmware-tanzu/velero/pkg/restic"
|
||||
"github.com/vmware-tanzu/velero/pkg/restore"
|
||||
"github.com/vmware-tanzu/velero/pkg/util/logging"
|
||||
|
||||
ctrl "sigs.k8s.io/controller-runtime"
|
||||
|
||||
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
|
||||
"github.com/vmware-tanzu/velero/internal/util/managercontroller"
|
||||
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -240,6 +249,7 @@ type server struct {
|
||||
resticManager restic.RepositoryManager
|
||||
metrics *metrics.ServerMetrics
|
||||
config serverConfig
|
||||
mgr manager.Manager
|
||||
}
|
||||
|
||||
func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*server, error) {
|
||||
@@ -294,6 +304,16 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
|
||||
}
|
||||
}
|
||||
|
||||
scheme := runtime.NewScheme()
|
||||
velerov1api.AddToScheme(scheme)
|
||||
mgr, err := ctrl.NewManager(clientConfig, ctrl.Options{
|
||||
Scheme: scheme,
|
||||
})
|
||||
if err != nil {
|
||||
cancelFunc()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
s := &server{
|
||||
namespace: f.Namespace(),
|
||||
metricsAddress: config.metricsAddress,
|
||||
@@ -311,6 +331,7 @@ func newServer(f client.Factory, config serverConfig, logger *logrus.Logger) (*s
|
||||
logLevel: logger.Level,
|
||||
pluginRegistry: pluginRegistry,
|
||||
config: config,
|
||||
mgr: mgr,
|
||||
}
|
||||
|
||||
return s, nil
|
||||
@@ -342,7 +363,13 @@ func (s *server) run() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := s.veleroClient.VeleroV1().BackupStorageLocations(s.namespace).Get(s.config.defaultBackupLocation, metav1.GetOptions{}); err != nil {
|
||||
// Fetching from the server directly since at this point
|
||||
// the cache has not yet started
|
||||
bsl := &velerov1api.BackupStorageLocation{}
|
||||
if err := s.mgr.GetAPIReader().Get(context.Background(), kbclient.ObjectKey{
|
||||
Namespace: s.namespace,
|
||||
Name: s.config.defaultBackupLocation,
|
||||
}, bsl); err != nil {
|
||||
s.logger.WithError(errors.WithStack(err)).
|
||||
Warnf("A backup storage location named %s has been specified for the server to use by default, but no corresponding backup storage location exists. Backups with a location not matching the default will need to explicitly specify an existing location", s.config.defaultBackupLocation)
|
||||
}
|
||||
@@ -442,8 +469,12 @@ func (s *server) validateBackupStorageLocations() error {
|
||||
pluginManager := clientmgmt.NewManager(s.logger, s.logLevel, s.pluginRegistry)
|
||||
defer pluginManager.CleanupClients()
|
||||
|
||||
locations, err := s.veleroClient.VeleroV1().BackupStorageLocations(s.namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
// Fetching from the server directly since at this point
|
||||
// the cache has not yet started
|
||||
locations := &velerov1api.BackupStorageLocationList{}
|
||||
if err := s.mgr.GetAPIReader().List(context.Background(), locations, &kbclient.ListOptions{
|
||||
Namespace: s.namespace,
|
||||
}); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
@@ -543,7 +574,7 @@ func (s *server) initRestic() error {
|
||||
secretsInformer,
|
||||
s.sharedInformerFactory.Velero().V1().ResticRepositories(),
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations(),
|
||||
s.mgr.GetClient(),
|
||||
s.kubeClient.CoreV1(),
|
||||
s.kubeClient.CoreV1(),
|
||||
s.logger,
|
||||
@@ -588,7 +619,6 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.logger.Info("Starting controllers")
|
||||
|
||||
ctx := s.ctx
|
||||
var wg sync.WaitGroup
|
||||
|
||||
go func() {
|
||||
metricsMux := http.NewServeMux()
|
||||
@@ -611,10 +641,9 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
backupSyncControllerRunInfo := func() controllerRunInfo {
|
||||
backupSyncContoller := controller.NewBackupSyncController(
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.mgr.GetClient(),
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.sharedInformerFactory.Velero().V1().Backups().Lister(),
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations().Lister(),
|
||||
s.config.backupSyncPeriod,
|
||||
s.namespace,
|
||||
s.csiSnapshotClient,
|
||||
@@ -653,7 +682,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.logLevel,
|
||||
newPluginManager,
|
||||
backupTracker,
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations().Lister(),
|
||||
s.mgr.GetClient(),
|
||||
s.config.defaultBackupLocation,
|
||||
s.config.defaultVolumesToRestic,
|
||||
s.config.defaultBackupTTL,
|
||||
@@ -693,7 +722,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.sharedInformerFactory.Velero().V1().Backups(),
|
||||
s.sharedInformerFactory.Velero().V1().DeleteBackupRequests().Lister(),
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations().Lister(),
|
||||
s.mgr.GetClient(),
|
||||
)
|
||||
|
||||
return controllerRunInfo{
|
||||
@@ -713,7 +742,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
backupTracker,
|
||||
s.resticManager,
|
||||
s.sharedInformerFactory.Velero().V1().PodVolumeBackups().Lister(),
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations().Lister(),
|
||||
s.mgr.GetClient(),
|
||||
s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations().Lister(),
|
||||
csiVSLister,
|
||||
csiVSCLister,
|
||||
@@ -748,7 +777,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.veleroClient.VeleroV1(),
|
||||
restorer,
|
||||
s.sharedInformerFactory.Velero().V1().Backups().Lister(),
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations().Lister(),
|
||||
s.mgr.GetClient(),
|
||||
s.sharedInformerFactory.Velero().V1().VolumeSnapshotLocations().Lister(),
|
||||
s.logger,
|
||||
s.logLevel,
|
||||
@@ -769,7 +798,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.logger,
|
||||
s.sharedInformerFactory.Velero().V1().ResticRepositories(),
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations().Lister(),
|
||||
s.mgr.GetClient(),
|
||||
s.resticManager,
|
||||
s.config.defaultResticMaintenanceFrequency,
|
||||
)
|
||||
@@ -785,7 +814,7 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.veleroClient.VeleroV1(),
|
||||
s.sharedInformerFactory.Velero().V1().DownloadRequests(),
|
||||
s.sharedInformerFactory.Velero().V1().Restores().Lister(),
|
||||
s.sharedInformerFactory.Velero().V1().BackupStorageLocations().Lister(),
|
||||
s.mgr.GetClient(),
|
||||
s.sharedInformerFactory.Velero().V1().Backups().Lister(),
|
||||
newPluginManager,
|
||||
s.logger,
|
||||
@@ -872,23 +901,22 @@ func (s *server) runControllers(defaultVolumeSnapshotLocations map[string]string
|
||||
s.logger.WithField("informer", informer).Info("Informer cache synced")
|
||||
}
|
||||
|
||||
// now that the informer caches have all synced, we can start running the controllers
|
||||
// TODO(2.0): presuming all controllers and resources are converted to runtime-controller
|
||||
// by v2.0, the block from this line and including the `s.mgr.Start() will be
|
||||
// deprecated, since the manager auto-starts all the caches. Until then, we need to start the
|
||||
// cache for them manually.
|
||||
for i := range controllers {
|
||||
controllerRunInfo := controllers[i]
|
||||
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
controllerRunInfo.controller.Run(ctx, controllerRunInfo.numWorkers)
|
||||
wg.Done()
|
||||
}()
|
||||
// Adding the controllers to the manager will register them as a (runtime-controller) runnable,
|
||||
// so the manager will ensure the cache is started and ready before all controller are started
|
||||
s.mgr.Add(managercontroller.Runnable(controllerRunInfo.controller, controllerRunInfo.numWorkers))
|
||||
}
|
||||
|
||||
s.logger.Info("Server started successfully")
|
||||
s.logger.Info("Server starting...")
|
||||
|
||||
<-ctx.Done()
|
||||
|
||||
s.logger.Info("Waiting for all controllers to shut down gracefully")
|
||||
wg.Wait()
|
||||
if err := s.mgr.Start(s.ctx.Done()); err != nil {
|
||||
s.logger.Fatal("Problem starting manager", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -30,7 +30,7 @@ var (
|
||||
{Name: "Name", Type: "string", Format: "name"},
|
||||
{Name: "Provider"},
|
||||
{Name: "Bucket/Prefix"},
|
||||
{Name: "Status"},
|
||||
{Name: "Phase"},
|
||||
{Name: "Access Mode"},
|
||||
}
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user