Initial commit

Signed-off-by: Andy Goldstein <andy.goldstein@gmail.com>
This commit is contained in:
Andy Goldstein
2017-08-02 13:27:17 -04:00
commit 2fe501f527
2024 changed files with 948288 additions and 0 deletions

134
pkg/apis/ark/v1/backup.go Normal file
View File

@@ -0,0 +1,134 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// BackupSpec defines the specification for an Ark backup.
type BackupSpec struct {
// IncludedNamespaces is a slice of namespace names to include objects
// from. If empty, all namespaces are included.
IncludedNamespaces []string `json:"includedNamespaces"`
// ExcludedNamespaces contains a list of namespaces that are not
// included in the backup.
ExcludedNamespaces []string `json:"excludedNamespaces"`
// IncludedResources is a slice of resource names to include
// in the backup. If empty, all resources are included.
IncludedResources []string `json:"includedResources"`
// ExcludedResources is a slice of resource names that are not
// included in the backup.
ExcludedResources []string `json:"excludedResources"`
// LabelSelector is a metav1.LabelSelector to filter with
// when adding individual objects to the backup. If empty
// or nil, all objects are included. Optional.
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
// SnapshotVolumes is a bool which specifies whether to take
// cloud snapshots of any PV's referenced in the set of objects
// included in the Backup.
SnapshotVolumes bool `json:"snapshotVolumes"`
// TTL is a time.Duration-parseable string describing how long
// the Backup should be retained for.
TTL metav1.Duration `json:"ttl"`
}
// BackupPhase is a string representation of the lifecycle phase
// of an Ark backup.
type BackupPhase string
const (
// BackupPhaseNew means the backup has been created but not
// yet processed by the BackupController.
BackupPhaseNew BackupPhase = "New"
// BackupPhaseFailedValidation means the backup has failed
// the controller's validations and therefore will not run.
BackupPhaseFailedValidation BackupPhase = "FailedValidation"
// BackupPhaseInProgress means the backup is currently executing.
BackupPhaseInProgress BackupPhase = "InProgress"
// BackupPhaseCompleted means the backup has run successfully without
// errors.
BackupPhaseCompleted BackupPhase = "Completed"
// BackupPhaseFailed mean the backup ran but encountered an error that
// prevented it from completing successfully.
BackupPhaseFailed BackupPhase = "Failed"
)
// BackupStatus captures the current status of an Ark backup.
type BackupStatus struct {
// Version is the backup format version.
Version int `json:"version"`
// Expiration is when this Backup is eligible for garbage-collection.
Expiration metav1.Time `json:"expiration"`
// Phase is the current state of the Backup.
Phase BackupPhase `json:"phase"`
// VolumeBackups is a map of PersistentVolume names to
// information about the backed-up volume in the cloud
// provider API.
VolumeBackups map[string]*VolumeBackupInfo `json:"volumeBackups"`
// ValidationErrors is a slice of all validation errors (if
// applicable).
ValidationErrors []string `json:"validationErrors"`
}
// VolumeBackupInfo captures the required information about
// a PersistentVolume at backup time to be able to restore
// it later.
type VolumeBackupInfo struct {
// SnapshotID is the ID of the snapshot taken in the cloud
// provider API of this volume.
SnapshotID string `json:"snapshotID"`
// Type is the type of the disk/volume in the cloud provider
// API.
Type string `json:"type"`
// Iops is the optional value of provisioned IOPS for the
// disk/volume in the cloud provider API.
Iops *int `json:"iops"`
}
// +genclient=true
// Backup is an Ark resource that respresents the capture of Kubernetes
// cluster state at a point in time (API objects and associated volume state).
type Backup struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec BackupSpec `json:"spec"`
Status BackupStatus `json:"status,omitempty"`
}
// BackupList is a list of Backups.
type BackupList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Backup `json:"items"`
}

113
pkg/apis/ark/v1/config.go Normal file
View File

@@ -0,0 +1,113 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// ConfigList is a list of Configs.
type ConfigList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Config `json:"items"`
}
// +genclient=true
// Config is an Ark resource that captures configuration information to be
// used for running the Ark server.
type Config struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
// PersistentVolumeProvider is the configuration information for the cloud where
// the cluster is running and has PersistentVolumes to snapshot or restore.
PersistentVolumeProvider CloudProviderConfig `json:"persistentVolumeProvider"`
// BackupStorageProvider is the configuration information for the cloud where
// Ark backups are stored in object storage. This may be a different cloud than
// where the cluster is running.
BackupStorageProvider ObjectStorageProviderConfig `json:"backupStorageProvider"`
// BackupSyncPeriod is how often the BackupSyncController runs to ensure all
// Ark backups in object storage exist as Backup API objects in the cluster.
BackupSyncPeriod metav1.Duration `json:"backupSyncPeriod"`
// GCSyncPeriod is how often the GCController runs to delete expired backup
// API objects and corresponding backup files in object storage.
GCSyncPeriod metav1.Duration `json:"gcSyncPeriod"`
// ScheduleSyncPeriod is how often the ScheduleController runs to check for
// new backups that should be triggered based on schedules.
ScheduleSyncPeriod metav1.Duration `json:"scheduleSyncPeriod"`
// ResourcePriorities is an ordered slice of resources specifying the desired
// order of resource restores. Any resources not in the list will be restored
// alphabetically after the prioritized resources.
ResourcePriorities []string `json:"resourcePriorities"`
// RestoreOnlyMode is whether Ark should run in a mode where only restores
// are allowed; backups, schedules, and garbage-collection are all disabled.
RestoreOnlyMode bool `json:"restoreOnlyMode"`
}
// CloudProviderConfig is configuration information about how to connect
// to a particular cloud. Only one of the members (AWS, GCP, Azure) may
// be present.
type CloudProviderConfig struct {
// AWS is configuration information for connecting to AWS.
AWS *AWSConfig `json:"aws"`
// GCP is configuration information for connecting to GCP.
GCP *GCPConfig `json:"gcp"`
// Azure is configuration information for connecting to Azure.
Azure *AzureConfig `json:"azure"`
}
// ObjectStorageProviderConfig is configuration information for connecting to
// a particular bucket in object storage to access Ark backups.
type ObjectStorageProviderConfig struct {
// CloudProviderConfig is the configuration information for the cloud where
// Ark backups are stored in object storage.
CloudProviderConfig `json:",inline"`
// Bucket is the name of the bucket in object storage where Ark backups
// are stored.
Bucket string `json:"bucket"`
}
// AWSConfig is configuration information for connecting to AWS.
type AWSConfig struct {
Region string `json:"region"`
AvailabilityZone string `json:"availabilityZone"`
DisableSSL bool `json:"disableSSL"`
S3ForcePathStyle bool `json:"s3ForcePathStyle"`
S3Url string `json:"s3Url"`
}
// GCPConfig is configuration information for connecting to GCP.
type GCPConfig struct {
Project string `json:"project"`
Zone string `json:"zone"`
}
// AzureConfig is configuration information for connecting to Azure.
type AzureConfig struct {
Location string `json:"location"`
APITimeout metav1.Duration `json:"apiTimeout"`
}

View File

@@ -0,0 +1,36 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
const (
// DefaultNamespace is the Kubernetes namespace that is used by default for
// the Ark server and API objects.
DefaultNamespace = "heptio-ark"
// RestoreLabelKey is the label key that's applied to all resources that
// are created during a restore. This is applied for ease of identification
// of restored resources. The value will be the restore's name.
RestoreLabelKey = "ark-restore"
// ClusterScopedDir is the name of the directory containing cluster-scoped
// resources within an Ark backup.
ClusterScopedDir = "cluster"
// NamespaceScopedDir is the name of the directory containing namespace-scoped
// resource within an Ark backup.
NamespaceScopedDir = "namespaces"
)

19
pkg/apis/ark/v1/doc.go Normal file
View File

@@ -0,0 +1,19 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package v1 is the v1 version of the API.
// +groupName=ark.heptio.com
package v1

View File

@@ -0,0 +1,57 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
// SchemeBuilder collects the scheme builder functions for the Ark API
SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes)
// AddToScheme applies the SchemeBuilder functions to a specified scheme
AddToScheme = SchemeBuilder.AddToScheme
)
// GroupName is the group name for the Ark API
const GroupName = "ark.heptio.com"
// SchemeGroupVersion is the GroupVersion for the Ark API
var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1"}
// Resource gets an Ark GroupResource for a specified resource
func Resource(resource string) schema.GroupResource {
return SchemeGroupVersion.WithResource(resource).GroupResource()
}
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Backup{},
&BackupList{},
&Schedule{},
&ScheduleList{},
&Restore{},
&RestoreList{},
&Config{},
&ConfigList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil
}

120
pkg/apis/ark/v1/restore.go Normal file
View File

@@ -0,0 +1,120 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// RestoreSpec defines the specification for an Ark restore.
type RestoreSpec struct {
// BackupName is the unique name of the Ark backup to restore
// from.
BackupName string `json:"backupName"`
// Namespaces is a slice of namespaces in the Ark backup to restore.
Namespaces []string `json:"namespaces"`
// NamespaceMapping is a map of source namespace names
// to target namespace names to restore into. Any source
// namespaces not included in the map will be restored into
// namespaces of the same name.
NamespaceMapping map[string]string `json:"namespaceMapping"`
// LabelSelector is a metav1.LabelSelector to filter with
// when restoring individual objects from the backup. If empty
// or nil, all objects are included. Optional.
LabelSelector *metav1.LabelSelector `json:"labelSelector"`
// RestorePVs is a bool defining whether to restore all included
// PVs from snapshot (via the cloudprovider). Default false.
RestorePVs bool `json:"restorePVs"`
}
// RestorePhase is a string representation of the lifecycle phase
// of an Ark restore
type RestorePhase string
const (
// RestorePhaseNew means the restore has been created but not
// yet processed by the RestoreController
RestorePhaseNew RestorePhase = "New"
// RestorePhaseFailedValidation means the restore has failed
// the controller's validations and therefore will not run.
RestorePhaseFailedValidation RestorePhase = "FailedValidation"
// RestorePhaseInProgress means the restore is currently executing.
RestorePhaseInProgress RestorePhase = "InProgress"
// RestorePhaseCompleted means the restore has finished executing.
// Any relevant warnings or errors will be captured in the Status.
RestorePhaseCompleted RestorePhase = "Completed"
)
// RestoreStatus captures the current status of an Ark restore
type RestoreStatus struct {
// Phase is the current state of the Restore
Phase RestorePhase `json:"phase"`
// ValidationErrors is a slice of all validation errors (if
// applicable)
ValidationErrors []string `json:"validationErrors"`
// Warnings is a collection of all warning messages that were
// generated during execution of the restore
Warnings RestoreResult `json:"warnings"`
// Errors is a collection of all error messages that were
// generated during execution of the restore
Errors RestoreResult `json:"errors"`
}
// RestoreResult is a collection of messages that were generated
// during execution of a restore. This will typically store either
// warning or error messages.
type RestoreResult struct {
// Ark is a slice of messages related to the operation of Ark
// itself (for example, messages related to connecting to the
// cloud, reading a backup file, etc.)
Ark []string `json:"ark"`
// Cluster is a slice of messages related to restoring cluster-
// scoped resources.
Cluster []string `json:"cluster"`
// Namespaces is a map of namespace name to slice of messages
// related to restoring namespace-scoped resources.
Namespaces map[string][]string `json:"namespaces"`
}
// +genclient=true
// Restore is an Ark resource that represents the application of
// resources from an Ark backup to a target Kubernetes cluster.
type Restore struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec RestoreSpec `json:"spec"`
Status RestoreStatus `json:"status,omitempty"`
}
// RestoreList is a list of Restores.
type RestoreList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Restore `json:"items"`
}

View File

@@ -0,0 +1,81 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1
import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// ScheduleSpec defines the specification for an Ark schedule
type ScheduleSpec struct {
// Template is the definition of the Backup to be run
// on the provided schedule
Template BackupSpec `json:"template"`
// Schedule is a Cron expression defining when to run
// the Backup.
Schedule string `json:"schedule"`
}
// SchedulePhase is a string representation of the lifecycle phase
// of an Ark schedule
type SchedulePhase string
const (
// SchedulePhaseNew means the schedule has been created but not
// yet processed by the ScheduleController
SchedulePhaseNew SchedulePhase = "New"
// SchedulePhaseEnabled means the schedule has been validated and
// will now be triggering backups according to the schedule spec.
SchedulePhaseEnabled SchedulePhase = "Enabled"
// SchedulePhaseFailedValidation means the schedule has failed
// the controller's validations and therefore will not trigger backups.
SchedulePhaseFailedValidation SchedulePhase = "FailedValidation"
)
// ScheduleStatus captures the current state of an Ark schedule
type ScheduleStatus struct {
// Phase is the current phase of the Schedule
Phase SchedulePhase `json:"phase"`
// LastBackup is the last time a Backup was run for this
// Schedule schedule
LastBackup metav1.Time `json:"lastBackup"`
// ValidationErrors is a slice of all validation errors (if
// applicable)
ValidationErrors []string `json:"validationErrors"`
}
// +genclient=true
// Schedule is an Ark resource that represents a pre-scheduled or
// periodic Backup that should be run.
type Schedule struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata"`
Spec ScheduleSpec `json:"spec"`
Status ScheduleStatus `json:"status,omitempty"`
}
// ScheduleList is a list of Schedules.
type ScheduleList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Schedule `json:"items"`
}

401
pkg/backup/backup.go Normal file
View File

@@ -0,0 +1,401 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"archive/tar"
"compress/gzip"
"encoding/json"
"fmt"
"io"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
kuberrs "k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/util/collections"
)
// Backupper performs backups.
type Backupper interface {
// Backup takes a backup using the specification in the api.Backup and writes backup data to the
// given writers.
Backup(backup *api.Backup, data io.Writer) error
}
// kubernetesBackupper implements Backupper.
type kubernetesBackupper struct {
dynamicFactory client.DynamicFactory
discoveryHelper discovery.Helper
actions map[schema.GroupResource]Action
itemBackupper itemBackupper
}
var _ Backupper = &kubernetesBackupper{}
// Action is an actor that performs an operation on an individual item being backed up.
type Action interface {
// Execute is invoked on an item being backed up. If an error is returned, the Backup is marked as
// failed.
Execute(item map[string]interface{}, backup *api.Backup) error
}
// NewKubernetesBackupper creates a new kubernetesBackupper.
func NewKubernetesBackupper(
discoveryHelper discovery.Helper,
dynamicFactory client.DynamicFactory,
actions map[string]Action,
) (Backupper, error) {
resolvedActions, err := resolveActions(discoveryHelper.Mapper(), actions)
if err != nil {
return nil, err
}
return &kubernetesBackupper{
discoveryHelper: discoveryHelper,
dynamicFactory: dynamicFactory,
actions: resolvedActions,
itemBackupper: &realItemBackupper{},
}, nil
}
// resolveActions resolves the string-based map of group-resources to actions and returns a map of
// schema.GroupResources to actions.
func resolveActions(mapper meta.RESTMapper, actions map[string]Action) (map[schema.GroupResource]Action, error) {
ret := make(map[schema.GroupResource]Action)
for resource, action := range actions {
gr, err := resolveGroupResource(mapper, resource)
if err != nil {
return nil, err
}
ret[gr] = action
}
return ret, nil
}
// getResourceIncludesExcludes takes the lists of resources to include and exclude from the
// backup, uses the RESTMapper to resolve them to fully-qualified group-resource names, and returns
// an IncludesExcludes list.
func getResourceIncludesExcludes(mapper meta.RESTMapper, backup *api.Backup) *collections.IncludesExcludes {
resources := collections.NewIncludesExcludes()
resolve := func(list []string, allowAll bool, f func(string)) {
for _, resource := range list {
if allowAll && resource == "*" {
f("*")
return
}
gr, err := resolveGroupResource(mapper, resource)
if err != nil {
glog.Errorf("unable to include resource %q in backup: %v", resource, err)
continue
}
f(gr.String())
}
}
resolve(backup.Spec.IncludedResources, true, func(s string) { resources.Includes(s) })
resolve(backup.Spec.ExcludedResources, false, func(s string) { resources.Excludes(s) })
return resources
}
// resolveGroupResource uses the RESTMapper to resolve resource to a fully-qualified
// schema.GroupResource. If the RESTMapper is unable to do so, an error is returned instead.
func resolveGroupResource(mapper meta.RESTMapper, resource string) (schema.GroupResource, error) {
gvr, err := mapper.ResourceFor(schema.ParseGroupResource(resource).WithVersion(""))
if err != nil {
return schema.GroupResource{}, err
}
return gvr.GroupResource(), nil
}
// getNamespaceIncludesExcludes returns an IncludesExcludes list containing which namespaces to
// include and exclude from the backup.
func getNamespaceIncludesExcludes(backup *api.Backup) *collections.IncludesExcludes {
return collections.NewIncludesExcludes().Includes(backup.Spec.IncludedNamespaces...).Excludes(backup.Spec.ExcludedNamespaces...)
}
type backupContext struct {
backup *api.Backup
w tarWriter
namespaceIncludesExcludes *collections.IncludesExcludes
resourceIncludesExcludes *collections.IncludesExcludes
// deploymentsBackedUp marks whether we've seen and are backing up the deployments resource, from
// either the apps or extensions api groups. We only want to back them up once, from whichever api
// group we see first.
deploymentsBackedUp bool
// networkPoliciesBackedUp marks whether we've seen and are backing up the networkpolicies
// resource, from either the networking.k8s.io or extensions api groups. We only want to back them
// up once, from whichever api group we see first.
networkPoliciesBackedUp bool
}
// Backup backs up the items specified in the Backup, placing them in a gzip-compressed tar file
// written to data. The finalized api.Backup is written to metadata.
func (kb *kubernetesBackupper) Backup(backup *api.Backup, data io.Writer) error {
gzw := gzip.NewWriter(data)
defer gzw.Close()
tw := tar.NewWriter(gzw)
defer tw.Close()
var errs []error
ctx := &backupContext{
backup: backup,
w: tw,
namespaceIncludesExcludes: getNamespaceIncludesExcludes(backup),
resourceIncludesExcludes: getResourceIncludesExcludes(kb.discoveryHelper.Mapper(), backup),
}
for _, group := range kb.discoveryHelper.Resources() {
glog.V(2).Infof("Backing up group %q\n", group.GroupVersion)
if err := kb.backupGroup(ctx, group); err != nil {
errs = append(errs, err)
}
}
return kuberrs.NewAggregate(errs)
}
type tarWriter interface {
io.Closer
Write([]byte) (int, error)
WriteHeader(*tar.Header) error
}
// backupGroup backs up a single API group.
func (kb *kubernetesBackupper) backupGroup(ctx *backupContext, group *metav1.APIResourceList) error {
var errs []error
for _, resource := range group.APIResources {
glog.V(2).Infof("Backing up resource %s/%s\n", group.GroupVersion, resource.Name)
if err := kb.backupResource(ctx, group, resource); err != nil {
errs = append(errs, err)
}
}
return kuberrs.NewAggregate(errs)
}
const (
appsDeploymentsResource = "deployments.apps"
extensionsDeploymentsResource = "deployments.extensions"
networkingNetworkPoliciesResource = "networkpolicies.networking.k8s.io"
extensionsNetworkPoliciesResource = "networkpolicies.extensions"
)
// backupResource backs up all the objects for a given group-version-resource.
func (kb *kubernetesBackupper) backupResource(
ctx *backupContext,
group *metav1.APIResourceList,
resource metav1.APIResource,
) error {
var errs []error
gv, err := schema.ParseGroupVersion(group.GroupVersion)
if err != nil {
return err
}
gvr := schema.GroupVersionResource{Group: gv.Group, Version: gv.Version}
gr := schema.GroupResource{Group: gv.Group, Resource: resource.Name}
grString := gr.String()
if !ctx.resourceIncludesExcludes.ShouldInclude(grString) {
glog.V(2).Infof("Not including resource %s\n", grString)
return nil
}
if grString == appsDeploymentsResource || grString == extensionsDeploymentsResource {
if ctx.deploymentsBackedUp {
var other string
if grString == appsDeploymentsResource {
other = extensionsDeploymentsResource
} else {
other = appsDeploymentsResource
}
glog.V(4).Infof("Skipping resource %q because it's a duplicate of %q", grString, other)
return nil
}
ctx.deploymentsBackedUp = true
}
if grString == networkingNetworkPoliciesResource || grString == extensionsNetworkPoliciesResource {
if ctx.networkPoliciesBackedUp {
var other string
if grString == networkingNetworkPoliciesResource {
other = extensionsNetworkPoliciesResource
} else {
other = networkingNetworkPoliciesResource
}
glog.V(4).Infof("Skipping resource %q because it's a duplicate of %q", grString, other)
return nil
}
ctx.networkPoliciesBackedUp = true
}
var namespacesToList []string
if resource.Namespaced {
namespacesToList = getNamespacesToList(ctx.namespaceIncludesExcludes)
} else {
namespacesToList = []string{""}
}
for _, namespace := range namespacesToList {
resourceClient, err := kb.dynamicFactory.ClientForGroupVersionResource(gvr, resource, namespace)
if err != nil {
return err
}
labelSelector := ""
if ctx.backup.Spec.LabelSelector != nil {
labelSelector = metav1.FormatLabelSelector(ctx.backup.Spec.LabelSelector)
}
unstructuredList, err := resourceClient.List(metav1.ListOptions{LabelSelector: labelSelector})
if err != nil {
return err
}
// do the backup
items, err := meta.ExtractList(unstructuredList)
if err != nil {
return err
}
action := kb.actions[gr]
for _, item := range items {
unstructured, ok := item.(runtime.Unstructured)
if !ok {
errs = append(errs, fmt.Errorf("unexpected type %T", item))
continue
}
obj := unstructured.UnstructuredContent()
if err := kb.itemBackupper.backupItem(ctx, obj, grString, action); err != nil {
errs = append(errs, err)
}
}
}
return kuberrs.NewAggregate(errs)
}
// getNamespacesToList examines ie and resolves the includes and excludes to a full list of
// namespaces to list. If ie is nil or it includes *, the result is just "" (list across all
// namespaces). Otherwise, the result is a list of every included namespace minus all excluded ones.
func getNamespacesToList(ie *collections.IncludesExcludes) []string {
if ie == nil {
return []string{""}
}
if ie.ShouldInclude("*") {
// "" means all namespaces
return []string{""}
}
var list []string
for _, i := range ie.GetIncludes() {
if ie.ShouldInclude(i) {
list = append(list, i)
}
}
return list
}
type itemBackupper interface {
backupItem(ctx *backupContext, item map[string]interface{}, groupResource string, action Action) error
}
type realItemBackupper struct{}
// backupItem backs up an individual item to tarWriter. The item may be excluded based on the
// namespaces IncludesExcludes list.
func (*realItemBackupper) backupItem(ctx *backupContext, item map[string]interface{}, groupResource string, action Action) error {
// Never save status
delete(item, "status")
metadata, err := collections.GetMap(item, "metadata")
if err != nil {
return err
}
name, err := collections.GetString(metadata, "name")
if err != nil {
return err
}
namespace, err := collections.GetString(metadata, "namespace")
if err == nil {
if !ctx.namespaceIncludesExcludes.ShouldInclude(namespace) {
glog.V(2).Infof("Excluding item %s because namespace %s is excluded\n", name, namespace)
return nil
}
}
if action != nil {
glog.V(4).Infof("Executing action on %s, ns=%s, name=%s", groupResource, namespace, name)
action.Execute(item, ctx.backup)
}
glog.V(2).Infof("Backing up resource=%s, ns=%s, name=%s", groupResource, namespace, name)
var filePath string
if namespace != "" {
filePath = strings.Join([]string{api.NamespaceScopedDir, namespace, groupResource, name + ".json"}, "/")
} else {
filePath = strings.Join([]string{api.ClusterScopedDir, groupResource, name + ".json"}, "/")
}
itemBytes, err := json.Marshal(item)
if err != nil {
return err
}
hdr := &tar.Header{
Name: filePath,
Size: int64(len(itemBytes)),
Typeflag: tar.TypeReg,
Mode: 0755,
ModTime: time.Now(),
}
if err := ctx.w.WriteHeader(hdr); err != nil {
return err
}
if _, err := ctx.w.Write(itemBytes); err != nil {
return err
}
return nil
}

1097
pkg/backup/backup_test.go Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,128 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"fmt"
"regexp"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/clock"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/util/collections"
)
// volumeSnapshotAction is a struct that knows how to take snapshots of PersistentVolumes
// that are backed by compatible cloud volumes.
type volumeSnapshotAction struct {
snapshotService cloudprovider.SnapshotService
clock clock.Clock
}
var _ Action = &volumeSnapshotAction{}
func NewVolumeSnapshotAction(snapshotService cloudprovider.SnapshotService) Action {
return &volumeSnapshotAction{
snapshotService: snapshotService,
clock: clock.RealClock{},
}
}
// Execute triggers a snapshot for the volume/disk underlying a PersistentVolume if the provided
// backup has volume snapshots enabled and the PV is of a compatible type. Also records cloud
// disk type and IOPS (if applicable) to be able to restore to current state later.
func (a *volumeSnapshotAction) Execute(volume map[string]interface{}, backup *api.Backup) error {
backupName := fmt.Sprintf("%s/%s", backup.Namespace, backup.Name)
if !backup.Spec.SnapshotVolumes {
glog.V(2).Infof("Backup %q has volume snapshots disabled; skipping volume snapshot action.", backupName)
return nil
}
metadata := volume["metadata"].(map[string]interface{})
name := metadata["name"].(string)
volumeID := getVolumeID(volume)
if volumeID == "" {
return fmt.Errorf("unable to determine volume ID for backup %q, PersistentVolume %q", backupName, name)
}
expiration := a.clock.Now().Add(backup.Spec.TTL.Duration)
glog.Infof("Backup %q: snapshotting PersistenVolume %q, volume-id %q, expiration %v", backupName, name, volumeID, expiration)
snapshotID, err := a.snapshotService.CreateSnapshot(volumeID)
if err != nil {
glog.V(4).Infof("error creating snapshot for backup %q, volume %q, volume-id %q: %v", backupName, name, volumeID, err)
return err
}
volumeType, iops, err := a.snapshotService.GetVolumeInfo(volumeID)
if err != nil {
glog.V(4).Infof("error getting volume info for backup %q, volume %q, volume-id %q: %v", backupName, name, volumeID, err)
return err
}
if backup.Status.VolumeBackups == nil {
backup.Status.VolumeBackups = make(map[string]*api.VolumeBackupInfo)
}
backup.Status.VolumeBackups[name] = &api.VolumeBackupInfo{
SnapshotID: snapshotID,
Type: volumeType,
Iops: iops,
}
return nil
}
var ebsVolumeIDRegex = regexp.MustCompile("vol-.*")
func getVolumeID(pv map[string]interface{}) string {
spec, err := collections.GetMap(pv, "spec")
if err != nil {
return ""
}
if aws, err := collections.GetMap(spec, "awsElasticBlockStore"); err == nil {
volumeID, err := collections.GetString(aws, "volumeID")
if err != nil {
return ""
}
return ebsVolumeIDRegex.FindString(volumeID)
}
if gce, err := collections.GetMap(spec, "gcePersistentDisk"); err == nil {
volumeID, err := collections.GetString(gce, "pdName")
if err != nil {
return ""
}
return volumeID
}
if gce, err := collections.GetMap(spec, "azureDisk"); err == nil {
volumeID, err := collections.GetString(gce, "diskName")
if err != nil {
return ""
}
return volumeID
}
return ""
}

View File

@@ -0,0 +1,205 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"reflect"
"testing"
"time"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
"github.com/heptio/ark/pkg/apis/ark/v1"
. "github.com/heptio/ark/pkg/util/test"
)
func TestVolumeSnapshotAction(t *testing.T) {
iops := 1000
tests := []struct {
name string
snapshotEnabled bool
pv string
ttl time.Duration
expectError bool
expectedVolumeID string
existingVolumeBackups map[string]*v1.VolumeBackupInfo
volumeInfo map[string]v1.VolumeBackupInfo
}{
{
name: "snapshot disabled",
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}}`,
snapshotEnabled: false,
},
{
name: "can't find volume id - missing spec",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}}`,
expectError: true,
},
{
name: "can't find volume id - spec but no volume source defined",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {}}`,
expectError: true,
},
{
name: "can't find volume id - aws but no volume id",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {}}}`,
expectError: true,
},
{
name: "can't find volume id - gce but no volume id",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {}}}`,
expectError: true,
},
{
name: "aws - simple volume id",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {"volumeID": "vol-abc123"}}}`,
expectError: false,
expectedVolumeID: "vol-abc123",
ttl: 5 * time.Minute,
volumeInfo: map[string]v1.VolumeBackupInfo{
"vol-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
},
},
{
name: "aws - simple volume id with provisioned IOPS",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {"volumeID": "vol-abc123"}}}`,
expectError: false,
expectedVolumeID: "vol-abc123",
ttl: 5 * time.Minute,
volumeInfo: map[string]v1.VolumeBackupInfo{
"vol-abc123": v1.VolumeBackupInfo{Type: "io1", Iops: &iops, SnapshotID: "snap-1"},
},
},
{
name: "aws - dynamically provisioned volume id",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"awsElasticBlockStore": {"volumeID": "aws://us-west-2a/vol-abc123"}}}`,
expectError: false,
expectedVolumeID: "vol-abc123",
ttl: 5 * time.Minute,
volumeInfo: map[string]v1.VolumeBackupInfo{
"vol-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
},
},
{
name: "gce",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
expectError: false,
expectedVolumeID: "pd-abc123",
ttl: 5 * time.Minute,
volumeInfo: map[string]v1.VolumeBackupInfo{
"pd-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
},
},
{
name: "preexisting volume backup info in backup status",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
expectError: false,
expectedVolumeID: "pd-abc123",
ttl: 5 * time.Minute,
existingVolumeBackups: map[string]*v1.VolumeBackupInfo{
"anotherpv": &v1.VolumeBackupInfo{SnapshotID: "anothersnap"},
},
volumeInfo: map[string]v1.VolumeBackupInfo{
"pd-abc123": v1.VolumeBackupInfo{Type: "gp", SnapshotID: "snap-1"},
},
},
{
name: "create snapshot error",
snapshotEnabled: true,
pv: `{"apiVersion": "v1", "kind": "PersistentVolume", "metadata": {"name": "mypv"}, "spec": {"gcePersistentDisk": {"pdName": "pd-abc123"}}}`,
expectError: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
backup := &v1.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: v1.DefaultNamespace,
Name: "mybackup",
},
Spec: v1.BackupSpec{
SnapshotVolumes: test.snapshotEnabled,
TTL: metav1.Duration{Duration: test.ttl},
},
Status: v1.BackupStatus{
VolumeBackups: test.existingVolumeBackups,
},
}
snapshotService := &FakeSnapshotService{SnapshottableVolumes: test.volumeInfo}
action := NewVolumeSnapshotAction(snapshotService).(*volumeSnapshotAction)
fakeClock := clock.NewFakeClock(time.Now())
action.clock = fakeClock
pv, err := getAsMap(test.pv)
if err != nil {
t.Fatal(err)
}
err = action.Execute(pv, backup)
gotErr := err != nil
if e, a := test.expectError, gotErr; e != a {
t.Errorf("error: expected %v, got %v", e, a)
}
if test.expectError {
return
}
if !test.snapshotEnabled {
// don't need to check anything else if snapshots are disabled
return
}
expectedVolumeBackups := test.existingVolumeBackups
if expectedVolumeBackups == nil {
expectedVolumeBackups = make(map[string]*v1.VolumeBackupInfo)
}
// we should have one snapshot taken exactly
require.Equal(t, 1, snapshotService.SnapshotsTaken.Len())
// the snapshotID should be the one in the entry in snapshotService.SnapshottableVolumes
// for the volume we ran the test for
snapshotID, _ := snapshotService.SnapshotsTaken.PopAny()
expectedVolumeBackups["mypv"] = &v1.VolumeBackupInfo{
SnapshotID: snapshotID,
Type: test.volumeInfo[test.expectedVolumeID].Type,
Iops: test.volumeInfo[test.expectedVolumeID].Iops,
}
if e, a := expectedVolumeBackups, backup.Status.VolumeBackups; !reflect.DeepEqual(e, a) {
t.Errorf("backup.status.VolumeBackups: expected %v, got %v", e, a)
}
})
}
}

27
pkg/buildinfo/version.go Normal file
View File

@@ -0,0 +1,27 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package buildinfo holds build-time information like the sonobuoy version.
// This is a separate package so that other packages can import it without
// worrying about introducing circular dependencies.
package buildinfo
// Version is the current version of Ark, set by the go linker's -X flag at build time.
var Version string
// DockerImage is the full path to the docker image for this build, for example
// gcr.io/heptio-images/ark.
var DockerImage string

31
pkg/client/client.go Normal file
View File

@@ -0,0 +1,31 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
)
// Config returns a *rest.Config, using either the kubeconfig (if specified) or an in-cluster
// configuration.
func Config(kubeconfig string) (*rest.Config, error) {
if len(kubeconfig) > 0 {
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}
return rest.InClusterConfig()
}

100
pkg/client/dynamic.go Normal file
View File

@@ -0,0 +1,100 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/dynamic"
)
// DynamicFactory contains methods for retrieving dynamic clients for GroupVersionResources and
// GroupVersionKinds.
type DynamicFactory interface {
// ClientForGroupVersionResource returns a Dynamic client for the given Group and Version
// (specified in gvr) and Resource (specified in resource) for the given namespace.
ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (Dynamic, error)
// ClientForGroupVersionKind returns a Dynamic client for the given Group and Version
// (specified in gvk) and Resource (specified in resource) for the given namespace.
ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (Dynamic, error)
}
// dynamicFactory implements DynamicFactory.
type dynamicFactory struct {
clientPool dynamic.ClientPool
}
var _ DynamicFactory = &dynamicFactory{}
// NewDynamicFactory returns a new ClientPool-based dynamic factory.
func NewDynamicFactory(clientPool dynamic.ClientPool) DynamicFactory {
return &dynamicFactory{clientPool: clientPool}
}
func (f *dynamicFactory) ClientForGroupVersionResource(gvr schema.GroupVersionResource, resource metav1.APIResource, namespace string) (Dynamic, error) {
dynamicClient, err := f.clientPool.ClientForGroupVersionResource(gvr)
if err != nil {
return nil, err
}
return &dynamicResourceClient{
resourceClient: dynamicClient.Resource(&resource, namespace),
}, nil
}
func (f *dynamicFactory) ClientForGroupVersionKind(gvk schema.GroupVersionKind, resource metav1.APIResource, namespace string) (Dynamic, error) {
dynamicClient, err := f.clientPool.ClientForGroupVersionKind(gvk)
if err != nil {
return nil, err
}
return &dynamicResourceClient{
resourceClient: dynamicClient.Resource(&resource, namespace),
}, nil
}
// Dynamic contains client methods that Ark needs for backing up and restoring resources.
type Dynamic interface {
// Create creates an object.
Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error)
// List lists all the objects of a given resource.
List(metav1.ListOptions) (runtime.Object, error)
// Watch watches for changes to objects of a given resource.
Watch(metav1.ListOptions) (watch.Interface, error)
}
// dynamicResourceClient implements Dynamic.
type dynamicResourceClient struct {
resourceClient *dynamic.ResourceClient
}
var _ Dynamic = &dynamicResourceClient{}
func (d *dynamicResourceClient) Create(obj *unstructured.Unstructured) (*unstructured.Unstructured, error) {
return d.resourceClient.Create(obj)
}
func (d *dynamicResourceClient) List(options metav1.ListOptions) (runtime.Object, error) {
return d.resourceClient.List(options)
}
func (d *dynamicResourceClient) Watch(options metav1.ListOptions) (watch.Interface, error) {
return d.resourceClient.Watch(options)
}

72
pkg/client/factory.go Normal file
View File

@@ -0,0 +1,72 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"os"
"github.com/spf13/pflag"
"github.com/heptio/ark/pkg/generated/clientset"
)
// Factory knows how to create an ArkClient.
type Factory interface {
// BindFlags binds common flags such as --kubeconfig to the passed-in FlagSet.
BindFlags(flags *pflag.FlagSet)
// Client returns an ArkClient. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
Client() (clientset.Interface, error)
}
type factory struct {
flags *pflag.FlagSet
kubeconfig string
}
// NewFactory returns a Factory.
func NewFactory() Factory {
f := &factory{
flags: pflag.NewFlagSet("", pflag.ContinueOnError),
}
f.flags.StringVar(&f.kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration")
return f
}
func (f *factory) BindFlags(flags *pflag.FlagSet) {
flags.AddFlagSet(f.flags)
}
func (f *factory) Client() (clientset.Interface, error) {
kubeconfig := f.kubeconfig
if kubeconfig == "" {
// if the command line flag was not specified, try the environment variable
kubeconfig = os.Getenv("KUBECONFIG")
}
clientConfig, err := Config(kubeconfig)
if err != nil {
return nil, err
}
arkClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
return arkClient, nil
}

View File

@@ -0,0 +1,164 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"fmt"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/heptio/ark/pkg/cloudprovider"
)
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
type blockStorageAdapter struct {
ec2 *ec2.EC2
az string
}
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType string, iops *int) (volumeID string, err error) {
req := &ec2.CreateVolumeInput{
SnapshotId: &snapshotID,
AvailabilityZone: &op.az,
VolumeType: &volumeType,
}
if iops != nil {
req.SetIops(int64(*iops))
}
res, err := op.ec2.CreateVolume(req)
if err != nil {
return "", err
}
return *res.VolumeId, nil
}
func (op *blockStorageAdapter) GetVolumeInfo(volumeID string) (string, *int, error) {
req := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
res, err := op.ec2.DescribeVolumes(req)
if err != nil {
return "", nil, err
}
if len(res.Volumes) != 1 {
return "", nil, fmt.Errorf("Expected one volume from DescribeVolumes for volume ID %v, got %v", volumeID, len(res.Volumes))
}
vol := res.Volumes[0]
var (
volumeType string
iops int
)
if vol.VolumeType != nil {
volumeType = *vol.VolumeType
}
if vol.Iops != nil {
iops = int(*vol.Iops)
}
return volumeType, &iops, nil
}
func (op *blockStorageAdapter) IsVolumeReady(volumeID string) (ready bool, err error) {
req := &ec2.DescribeVolumesInput{
VolumeIds: []*string{&volumeID},
}
res, err := op.ec2.DescribeVolumes(req)
if err != nil {
return false, err
}
if len(res.Volumes) != 1 {
return false, fmt.Errorf("Expected one volume from DescribeVolumes for volume ID %v, got %v", volumeID, len(res.Volumes))
}
return *res.Volumes[0].State == ec2.VolumeStateAvailable, nil
}
func (op *blockStorageAdapter) ListSnapshots(tagFilters map[string]string) ([]string, error) {
req := &ec2.DescribeSnapshotsInput{}
for k, v := range tagFilters {
filter := &ec2.Filter{}
filter.SetName(k)
filter.SetValues([]*string{&v})
req.Filters = append(req.Filters, filter)
}
res, err := op.ec2.DescribeSnapshots(req)
if err != nil {
return nil, err
}
var ret []string
for _, snapshot := range res.Snapshots {
ret = append(ret, *snapshot.SnapshotId)
}
return ret, nil
}
func (op *blockStorageAdapter) CreateSnapshot(volumeID string, tags map[string]string) (string, error) {
req := &ec2.CreateSnapshotInput{
VolumeId: &volumeID,
}
res, err := op.ec2.CreateSnapshot(req)
if err != nil {
return "", err
}
tagsReq := &ec2.CreateTagsInput{}
tagsReq.SetResources([]*string{res.SnapshotId})
ec2Tags := make([]*ec2.Tag, 0, len(tags))
for k, v := range tags {
key := k
val := v
tag := &ec2.Tag{Key: &key, Value: &val}
ec2Tags = append(ec2Tags, tag)
}
tagsReq.SetTags(ec2Tags)
_, err = op.ec2.CreateTags(tagsReq)
return *res.SnapshotId, err
}
func (op *blockStorageAdapter) DeleteSnapshot(snapshotID string) error {
req := &ec2.DeleteSnapshotInput{
SnapshotId: &snapshotID,
}
_, err := op.ec2.DeleteSnapshot(req)
return err
}

View File

@@ -0,0 +1,88 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"io"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/heptio/ark/pkg/cloudprovider"
)
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
type objectStorageAdapter struct {
s3 *s3.S3
}
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
req := &s3.PutObjectInput{
Bucket: &bucket,
Key: &key,
Body: body,
}
_, err := op.s3.PutObject(req)
return err
}
func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadCloser, error) {
req := &s3.GetObjectInput{
Bucket: &bucket,
Key: &key,
}
res, err := op.s3.GetObject(req)
if err != nil {
return nil, err
}
return res.Body, nil
}
func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
req := &s3.ListObjectsV2Input{
Bucket: &bucket,
Delimiter: &delimiter,
}
res, err := op.s3.ListObjectsV2(req)
if err != nil {
return nil, err
}
ret := make([]string, 0, len(res.CommonPrefixes))
for _, prefix := range res.CommonPrefixes {
ret = append(ret, *prefix.Prefix)
}
return ret, nil
}
func (op *objectStorageAdapter) DeleteObject(bucket string, key string) error {
req := &s3.DeleteObjectInput{
Bucket: &bucket,
Key: &key,
}
_, err := op.s3.DeleteObject(req)
return err
}

View File

@@ -0,0 +1,62 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package aws
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/ec2"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/heptio/ark/pkg/cloudprovider"
)
type storageAdapter struct {
blockStorage *blockStorageAdapter
objectStorage *objectStorageAdapter
}
var _ cloudprovider.StorageAdapter = &storageAdapter{}
func NewStorageAdapter(config *aws.Config, availabilityZone string) (cloudprovider.StorageAdapter, error) {
sess, err := session.NewSession(config)
if err != nil {
return nil, err
}
if _, err := sess.Config.Credentials.Get(); err != nil {
return nil, err
}
return &storageAdapter{
blockStorage: &blockStorageAdapter{
ec2: ec2.New(sess),
az: availabilityZone,
},
objectStorage: &objectStorageAdapter{
s3: s3.New(sess),
},
}, nil
}
func (op *storageAdapter) ObjectStorage() cloudprovider.ObjectStorageAdapter {
return op.objectStorage
}
func (op *storageAdapter) BlockStorage() cloudprovider.BlockStorageAdapter {
return op.blockStorage
}

View File

@@ -0,0 +1,187 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"context"
"errors"
"fmt"
"time"
azure "github.com/Azure/azure-sdk-for-go/arm/disk"
"github.com/satori/uuid"
"github.com/heptio/ark/pkg/cloudprovider"
)
type blockStorageAdapter struct {
disks *azure.DisksClient
snaps *azure.SnapshotsClient
subscription string
resourceGroup string
location string
apiTimeout time.Duration
}
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID, volumeType string, iops *int) (string, error) {
fullSnapshotName := getFullSnapshotName(op.subscription, op.resourceGroup, snapshotID)
diskName := "restore-" + uuid.NewV4().String()
disk := azure.Model{
Name: &diskName,
Location: &op.location,
Properties: &azure.Properties{
CreationData: &azure.CreationData{
CreateOption: azure.Copy,
SourceResourceID: &fullSnapshotName,
},
AccountType: azure.StorageAccountTypes(volumeType),
},
}
ctx, cancel := context.WithTimeout(context.Background(), op.apiTimeout)
defer cancel()
_, errChan := op.disks.CreateOrUpdate(op.resourceGroup, *disk.Name, disk, ctx.Done())
err := <-errChan
if err != nil {
return "", err
}
return diskName, nil
}
func (op *blockStorageAdapter) GetVolumeInfo(volumeID string) (string, *int, error) {
res, err := op.disks.Get(op.resourceGroup, volumeID)
if err != nil {
return "", nil, err
}
return string(res.AccountType), nil, nil
}
func (op *blockStorageAdapter) IsVolumeReady(volumeID string) (ready bool, err error) {
res, err := op.disks.Get(op.resourceGroup, volumeID)
if err != nil {
return false, err
}
if res.ProvisioningState == nil {
return false, errors.New("nil ProvisioningState returned from Get call")
}
return *res.ProvisioningState == "Succeeded", nil
}
func (op *blockStorageAdapter) ListSnapshots(tagFilters map[string]string) ([]string, error) {
res, err := op.snaps.ListByResourceGroup(op.resourceGroup)
if err != nil {
return nil, err
}
if res.Value == nil {
return nil, errors.New("nil Value returned from ListByResourceGroup call")
}
ret := make([]string, 0, len(*res.Value))
Snapshot:
for _, snap := range *res.Value {
if snap.Tags == nil && len(tagFilters) > 0 {
continue
}
if snap.ID == nil {
continue
}
// Azure doesn't offer tag-filtering through the API so we have to manually
// filter results. Require all filter keys to be present, with matching vals.
for filterKey, filterVal := range tagFilters {
if val, ok := (*snap.Tags)[filterKey]; !ok || val == nil || *val != filterVal {
continue Snapshot
}
}
ret = append(ret, *snap.Name)
}
return ret, nil
}
func (op *blockStorageAdapter) CreateSnapshot(volumeID string, tags map[string]string) (string, error) {
fullDiskName := getFullDiskName(op.subscription, op.resourceGroup, volumeID)
// snapshot names must be <= 80 characters long
var snapshotName string
suffix := "-" + uuid.NewV4().String()
if len(volumeID) <= (80 - len(suffix)) {
snapshotName = volumeID + suffix
} else {
snapshotName = volumeID[0:80-len(suffix)] + suffix
}
snap := azure.Snapshot{
Name: &snapshotName,
Properties: &azure.Properties{
CreationData: &azure.CreationData{
CreateOption: azure.Copy,
SourceResourceID: &fullDiskName,
},
},
Tags: &map[string]*string{},
Location: &op.location,
}
for k, v := range tags {
val := v
(*snap.Tags)[k] = &val
}
ctx, cancel := context.WithTimeout(context.Background(), op.apiTimeout)
defer cancel()
_, errChan := op.snaps.CreateOrUpdate(op.resourceGroup, *snap.Name, snap, ctx.Done())
err := <-errChan
if err != nil {
return "", err
}
return snapshotName, nil
}
func (op *blockStorageAdapter) DeleteSnapshot(snapshotID string) error {
ctx, cancel := context.WithTimeout(context.Background(), op.apiTimeout)
defer cancel()
_, errChan := op.snaps.Delete(op.resourceGroup, snapshotID, ctx.Done())
err := <-errChan
return err
}
func getFullDiskName(subscription string, resourceGroup string, diskName string) string {
return fmt.Sprintf("/subscriptions/%v/resourceGroups/%v/providers/Microsoft.Compute/disks/%v", subscription, resourceGroup, diskName)
}
func getFullSnapshotName(subscription string, resourceGroup string, snapshotName string) string {
return fmt.Sprintf("/subscriptions/%v/resourceGroups/%v/providers/Microsoft.Compute/snapshots/%v", subscription, resourceGroup, snapshotName)
}

View File

@@ -0,0 +1,138 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"io"
"strings"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/heptio/ark/pkg/cloudprovider"
)
// ref. https://github.com/Azure-Samples/storage-blob-go-getting-started/blob/master/storageExample.go
type objectStorageAdapter struct {
blobClient *storage.BlobStorageClient
}
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
container, err := getContainerReference(op.blobClient, bucket)
if err != nil {
return err
}
blob, err := getBlobReference(container, key)
if err != nil {
return err
}
// TODO having to seek to end/back to beginning to get
// length here is ugly. refactor to make this better.
len, err := body.Seek(0, io.SeekEnd)
if err != nil {
return err
}
blob.Properties.ContentLength = len
if _, err := body.Seek(0, 0); err != nil {
return err
}
return blob.CreateBlockBlobFromReader(body, nil)
}
func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadCloser, error) {
container, err := getContainerReference(op.blobClient, bucket)
if err != nil {
return nil, err
}
blob, err := getBlobReference(container, key)
if err != nil {
return nil, err
}
res, err := blob.Get(nil)
if err != nil {
return nil, err
}
return res, nil
}
func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
container, err := getContainerReference(op.blobClient, bucket)
if err != nil {
return nil, err
}
params := storage.ListBlobsParameters{
Delimiter: delimiter,
}
res, err := container.ListBlobs(params)
if err != nil {
return nil, err
}
// Azure returns prefixes inclusive of the last delimiter. We need to strip
// it.
ret := make([]string, 0, len(res.BlobPrefixes))
for _, prefix := range res.BlobPrefixes {
ret = append(ret, prefix[0:strings.LastIndex(prefix, delimiter)])
}
return ret, nil
}
func (op *objectStorageAdapter) DeleteObject(bucket string, key string) error {
container, err := getContainerReference(op.blobClient, bucket)
if err != nil {
return err
}
blob, err := getBlobReference(container, key)
if err != nil {
return err
}
return blob.Delete(nil)
}
func getContainerReference(blobClient *storage.BlobStorageClient, bucket string) (*storage.Container, error) {
container := blobClient.GetContainerReference(bucket)
if container == nil {
return nil, fmt.Errorf("unable to get container reference for bucket %v", bucket)
}
return container, nil
}
func getBlobReference(container *storage.Container, key string) (*storage.Blob, error) {
blob := container.GetBlobReference(key)
if blob == nil {
return nil, fmt.Errorf("unable to get blob reference for key %v", key)
}
return blob, nil
}

View File

@@ -0,0 +1,103 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package azure
import (
"fmt"
"os"
"time"
"github.com/Azure/azure-sdk-for-go/arm/disk"
"github.com/Azure/azure-sdk-for-go/arm/examples/helpers"
"github.com/Azure/azure-sdk-for-go/storage"
"github.com/Azure/go-autorest/autorest/azure"
"github.com/heptio/ark/pkg/cloudprovider"
)
const (
azureClientIDKey string = "AZURE_CLIENT_ID"
azureClientSecretKey string = "AZURE_CLIENT_SECRET"
azureSubscriptionIDKey string = "AZURE_SUBSCRIPTION_ID"
azureTenantIDKey string = "AZURE_TENANT_ID"
azureStorageAccountIDKey string = "AZURE_STORAGE_ACCOUNT_ID"
azureStorageKeyKey string = "AZURE_STORAGE_KEY"
azureResourceGroupKey string = "AZURE_RESOURCE_GROUP"
)
type storageAdapter struct {
objectStorage *objectStorageAdapter
blockStorage *blockStorageAdapter
}
var _ cloudprovider.StorageAdapter = &storageAdapter{}
func NewStorageAdapter(location string, apiTimeout time.Duration) (cloudprovider.StorageAdapter, error) {
cfg := map[string]string{
azureClientIDKey: "",
azureClientSecretKey: "",
azureSubscriptionIDKey: "",
azureTenantIDKey: "",
azureStorageAccountIDKey: "",
azureStorageKeyKey: "",
azureResourceGroupKey: "",
}
for key := range cfg {
cfg[key] = os.Getenv(key)
}
spt, err := helpers.NewServicePrincipalTokenFromCredentials(cfg, azure.PublicCloud.ResourceManagerEndpoint)
if err != nil {
return nil, fmt.Errorf("error creating new service principal: %v", err)
}
disksClient := disk.NewDisksClient(cfg[azureSubscriptionIDKey])
snapsClient := disk.NewSnapshotsClient(cfg[azureSubscriptionIDKey])
disksClient.Authorizer = spt
snapsClient.Authorizer = spt
storageClient, _ := storage.NewBasicClient(cfg[azureStorageAccountIDKey], cfg[azureStorageKeyKey])
blobClient := storageClient.GetBlobService()
if apiTimeout == 0 {
apiTimeout = time.Minute
}
return &storageAdapter{
objectStorage: &objectStorageAdapter{
blobClient: &blobClient,
},
blockStorage: &blockStorageAdapter{
disks: &disksClient,
snaps: &snapsClient,
subscription: cfg[azureSubscriptionIDKey],
resourceGroup: cfg[azureResourceGroupKey],
location: location,
apiTimeout: apiTimeout,
},
}, nil
}
func (op *storageAdapter) ObjectStorage() cloudprovider.ObjectStorageAdapter {
return op.objectStorage
}
func (op *storageAdapter) BlockStorage() cloudprovider.BlockStorageAdapter {
return op.blockStorage
}

View File

@@ -0,0 +1,92 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import (
"context"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/heptio/ark/pkg/apis/ark/v1"
)
// backupCacheBucket holds the backups and error from a GetAllBackups call.
type backupCacheBucket struct {
backups []*v1.Backup
error error
}
// backupCache caches GetAllBackups calls, refreshing them periodically.
type backupCache struct {
delegate BackupGetter
lock sync.RWMutex
// This doesn't really need to be a map right now, but if we ever move to supporting multiple
// buckets, this will be ready for it.
buckets map[string]*backupCacheBucket
}
var _ BackupGetter = &backupCache{}
// NewBackupCache returns a new backup cache that refreshes from delegate every resyncPeriod.
func NewBackupCache(ctx context.Context, delegate BackupGetter, resyncPeriod time.Duration) BackupGetter {
c := &backupCache{
delegate: delegate,
buckets: make(map[string]*backupCacheBucket),
}
// Start the goroutine to refresh all buckets every resyncPeriod. This stops when ctx.Done() is
// available.
go wait.Until(c.refresh, resyncPeriod, ctx.Done())
return c
}
// refresh refreshes all the buckets currently in the cache by doing a live lookup via c.delegate.
func (c *backupCache) refresh() {
c.lock.Lock()
defer c.lock.Unlock()
glog.V(4).Infof("refreshing all cached backup lists from object storage")
for bucketName, bucket := range c.buckets {
glog.V(4).Infof("refreshing bucket %q", bucketName)
bucket.backups, bucket.error = c.delegate.GetAllBackups(bucketName)
}
}
func (c *backupCache) GetAllBackups(bucketName string) ([]*v1.Backup, error) {
c.lock.RLock()
bucket, found := c.buckets[bucketName]
c.lock.RUnlock()
if found {
glog.V(4).Infof("returning cached backup list for bucket %q", bucketName)
return bucket.backups, bucket.error
}
glog.V(4).Infof("bucket %q is not in cache - doing a live lookup", bucketName)
backups, err := c.delegate.GetAllBackups(bucketName)
c.lock.Lock()
c.buckets[bucketName] = &backupCacheBucket{backups: backups, error: err}
c.lock.Unlock()
return backups, err
}

View File

@@ -0,0 +1,160 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import (
"context"
"errors"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/test"
)
func TestNewBackupCache(t *testing.T) {
delegate := &test.FakeBackupService{}
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
c := NewBackupCache(ctx, delegate, 100*time.Millisecond)
// nothing in cache, live lookup
bucket1 := []*v1.Backup{
test.NewTestBackup().WithName("backup1").Backup,
test.NewTestBackup().WithName("backup2").Backup,
}
delegate.On("GetAllBackups", "bucket1").Return(bucket1, nil).Once()
// should be updated via refresh
updatedBucket1 := []*v1.Backup{
test.NewTestBackup().WithName("backup2").Backup,
}
delegate.On("GetAllBackups", "bucket1").Return(updatedBucket1, nil)
// nothing in cache, live lookup
bucket2 := []*v1.Backup{
test.NewTestBackup().WithName("backup5").Backup,
test.NewTestBackup().WithName("backup6").Backup,
}
delegate.On("GetAllBackups", "bucket2").Return(bucket2, nil).Once()
// should be updated via refresh
updatedBucket2 := []*v1.Backup{
test.NewTestBackup().WithName("backup7").Backup,
}
delegate.On("GetAllBackups", "bucket2").Return(updatedBucket2, nil)
backups, err := c.GetAllBackups("bucket1")
assert.Equal(t, bucket1, backups)
assert.NoError(t, err)
backups, err = c.GetAllBackups("bucket2")
assert.Equal(t, bucket2, backups)
assert.NoError(t, err)
var done1, done2 bool
for {
select {
case <-ctx.Done():
t.Fatal("timed out")
default:
if done1 && done2 {
return
}
}
backups, err = c.GetAllBackups("bucket1")
if len(backups) == 1 {
if assert.Equal(t, updatedBucket1[0], backups[0]) {
done1 = true
}
}
backups, err = c.GetAllBackups("bucket2")
if len(backups) == 1 {
if assert.Equal(t, updatedBucket2[0], backups[0]) {
done2 = true
}
}
time.Sleep(100 * time.Millisecond)
}
}
func TestBackupCacheRefresh(t *testing.T) {
delegate := &test.FakeBackupService{}
c := &backupCache{
delegate: delegate,
buckets: map[string]*backupCacheBucket{
"bucket1": {},
"bucket2": {},
},
}
bucket1 := []*v1.Backup{
test.NewTestBackup().WithName("backup1").Backup,
test.NewTestBackup().WithName("backup2").Backup,
}
delegate.On("GetAllBackups", "bucket1").Return(bucket1, nil)
delegate.On("GetAllBackups", "bucket2").Return(nil, errors.New("bad"))
c.refresh()
assert.Equal(t, bucket1, c.buckets["bucket1"].backups)
assert.NoError(t, c.buckets["bucket1"].error)
assert.Empty(t, c.buckets["bucket2"].backups)
assert.EqualError(t, c.buckets["bucket2"].error, "bad")
}
func TestBackupCacheGetAllBackupsUsesCacheIfPresent(t *testing.T) {
delegate := &test.FakeBackupService{}
bucket1 := []*v1.Backup{
test.NewTestBackup().WithName("backup1").Backup,
test.NewTestBackup().WithName("backup2").Backup,
}
c := &backupCache{
delegate: delegate,
buckets: map[string]*backupCacheBucket{
"bucket1": {
backups: bucket1,
},
},
}
bucket2 := []*v1.Backup{
test.NewTestBackup().WithName("backup3").Backup,
test.NewTestBackup().WithName("backup4").Backup,
}
delegate.On("GetAllBackups", "bucket2").Return(bucket2, nil)
backups, err := c.GetAllBackups("bucket1")
assert.Equal(t, bucket1, backups)
assert.NoError(t, err)
backups, err = c.GetAllBackups("bucket2")
assert.Equal(t, bucket2, backups)
assert.NoError(t, err)
}

View File

@@ -0,0 +1,184 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import (
"context"
"fmt"
"io"
"io/ioutil"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/errors"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/scheme"
)
// BackupService contains methods for working with backups in object storage.
type BackupService interface {
BackupGetter
// UploadBackup uploads the specified Ark backup of a set of Kubernetes API objects, whose manifests are
// stored in the specified file, into object storage in an Ark bucket, tagged with Ark metadata. Returns
// an error if a problem is encountered accessing the file or performing the upload via the cloud API.
UploadBackup(bucket, name string, metadata, backup io.ReadSeeker) error
// DownloadBackup downloads an Ark backup with the specified object key from object storage via the cloud API.
// It returns the snapshot metadata and data (separately), or an error if a problem is encountered
// downloading or reading the file from the cloud API.
DownloadBackup(bucket, name string) (io.ReadCloser, error)
// DeleteBackup deletes the backup content in object storage for the given api.Backup.
DeleteBackup(bucket, backupName string) error
}
// BackupGetter knows how to list backups in object storage.
type BackupGetter interface {
// GetAllBackups lists all the api.Backups in object storage for the given bucket.
GetAllBackups(bucket string) ([]*api.Backup, error)
}
const (
metadataFileFormatString string = "%s/ark-backup.json"
backupFileFormatString string = "%s/%s.tar.gz"
)
type backupService struct {
objectStorage ObjectStorageAdapter
}
var _ BackupService = &backupService{}
var _ BackupGetter = &backupService{}
// NewBackupService creates a backup service using the provided object storage adapter
func NewBackupService(objectStorage ObjectStorageAdapter) BackupService {
return &backupService{
objectStorage: objectStorage,
}
}
func (br *backupService) UploadBackup(bucket, backupName string, metadata, backup io.ReadSeeker) error {
// upload metadata file
metadataKey := fmt.Sprintf(metadataFileFormatString, backupName)
if err := br.objectStorage.PutObject(bucket, metadataKey, metadata); err != nil {
return err
}
// upload tar file
if err := br.objectStorage.PutObject(bucket, fmt.Sprintf(backupFileFormatString, backupName, backupName), backup); err != nil {
// try to delete the metadata file since the data upload failed
deleteErr := br.objectStorage.DeleteObject(bucket, metadataKey)
return errors.NewAggregate([]error{err, deleteErr})
}
return nil
}
func (br *backupService) DownloadBackup(bucket, backupName string) (io.ReadCloser, error) {
return br.objectStorage.GetObject(bucket, fmt.Sprintf(backupFileFormatString, backupName, backupName))
}
func (br *backupService) GetAllBackups(bucket string) ([]*api.Backup, error) {
prefixes, err := br.objectStorage.ListCommonPrefixes(bucket, "/")
if err != nil {
return nil, err
}
if len(prefixes) == 0 {
return []*api.Backup{}, nil
}
output := make([]*api.Backup, 0, len(prefixes))
decoder := scheme.Codecs.UniversalDecoder(api.SchemeGroupVersion)
for _, backupDir := range prefixes {
err := func() error {
key := fmt.Sprintf(metadataFileFormatString, backupDir)
res, err := br.objectStorage.GetObject(bucket, key)
if err != nil {
return err
}
defer res.Close()
data, err := ioutil.ReadAll(res)
if err != nil {
return err
}
obj, _, err := decoder.Decode(data, nil, nil)
if err != nil {
return err
}
backup, ok := obj.(*api.Backup)
if !ok {
return fmt.Errorf("unexpected type for %s/%s: %T", bucket, key, obj)
}
output = append(output, backup)
return nil
}()
if err != nil {
return nil, err
}
}
return output, nil
}
func (br *backupService) DeleteBackup(bucket, backupName string) error {
var errs []error
key := fmt.Sprintf(backupFileFormatString, backupName, backupName)
glog.V(4).Infof("Trying to delete bucket=%s, key=%s", bucket, key)
if err := br.objectStorage.DeleteObject(bucket, key); err != nil {
errs = append(errs, err)
}
key = fmt.Sprintf(metadataFileFormatString, backupName)
glog.V(4).Infof("Trying to delete bucket=%s, key=%s", bucket, key)
if err := br.objectStorage.DeleteObject(bucket, key); err != nil {
errs = append(errs, err)
}
return errors.NewAggregate(errs)
}
// cachedBackupService wraps a real backup service with a cache for getting cloud backups.
type cachedBackupService struct {
BackupService
cache BackupGetter
}
// NewBackupServiceWithCachedBackupGetter returns a BackupService that uses a cache for
// GetAllBackups().
func NewBackupServiceWithCachedBackupGetter(ctx context.Context, delegate BackupService, resyncPeriod time.Duration) BackupService {
return &cachedBackupService{
BackupService: delegate,
cache: NewBackupCache(ctx, delegate, resyncPeriod),
}
}
func (c *cachedBackupService) GetAllBackups(bucketName string) ([]*api.Backup, error) {
return c.cache.GetAllBackups(bucketName)
}

View File

@@ -0,0 +1,407 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import (
"bytes"
"encoding/json"
"errors"
"io"
"io/ioutil"
"strings"
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/sets"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/util/encode"
)
func TestUploadBackup(t *testing.T) {
tests := []struct {
name string
bucket string
bucketExists bool
backupName string
metadata io.ReadSeeker
backup io.ReadSeeker
objectStoreErrs map[string]map[string]interface{}
expectedErr bool
expectedRes map[string][]byte
}{
{
name: "normal case",
bucket: "test-bucket",
bucketExists: true,
backupName: "test-backup",
metadata: newStringReadSeeker("foo"),
backup: newStringReadSeeker("bar"),
expectedErr: false,
expectedRes: map[string][]byte{
"test-backup/ark-backup.json": []byte("foo"),
"test-backup/test-backup.tar.gz": []byte("bar"),
},
},
{
name: "no such bucket causes error",
bucket: "test-bucket",
bucketExists: false,
backupName: "test-backup",
expectedErr: true,
},
{
name: "error on metadata upload does not upload data",
bucket: "test-bucket",
bucketExists: true,
backupName: "test-backup",
metadata: newStringReadSeeker("foo"),
backup: newStringReadSeeker("bar"),
objectStoreErrs: map[string]map[string]interface{}{
"putobject": map[string]interface{}{
"test-bucket||test-backup/ark-backup.json": true,
},
},
expectedErr: true,
expectedRes: make(map[string][]byte),
},
{
name: "error on data upload deletes metadata",
bucket: "test-bucket",
bucketExists: true,
backupName: "test-backup",
metadata: newStringReadSeeker("foo"),
backup: newStringReadSeeker("bar"),
objectStoreErrs: map[string]map[string]interface{}{
"putobject": map[string]interface{}{
"test-bucket||test-backup/test-backup.tar.gz": true,
},
},
expectedErr: true,
expectedRes: make(map[string][]byte),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
objStore := &fakeObjectStorage{
returnErrors: test.objectStoreErrs,
storage: make(map[string]map[string][]byte),
}
if test.bucketExists {
objStore.storage[test.bucket] = make(map[string][]byte)
}
backupService := NewBackupService(objStore)
err := backupService.UploadBackup(test.bucket, test.backupName, test.metadata, test.backup)
assert.Equal(t, test.expectedErr, err != nil, "got error %v", err)
assert.Equal(t, test.expectedRes, objStore.storage[test.bucket])
})
}
}
func TestDownloadBackup(t *testing.T) {
tests := []struct {
name string
bucket string
backupName string
storage map[string]map[string][]byte
expectedErr bool
expectedRes []byte
}{
{
name: "normal case",
bucket: "test-bucket",
backupName: "test-backup",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"test-backup/test-backup.tar.gz": []byte("foo"),
},
},
expectedErr: false,
expectedRes: []byte("foo"),
},
{
name: "no such bucket causes error",
bucket: "test-bucket",
backupName: "test-backup",
storage: map[string]map[string][]byte{},
expectedErr: true,
},
{
name: "no such key causes error",
bucket: "test-bucket",
backupName: "test-backup",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{},
},
expectedErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
objStore := &fakeObjectStorage{storage: test.storage}
backupService := NewBackupService(objStore)
rdr, err := backupService.DownloadBackup(test.bucket, test.backupName)
assert.Equal(t, test.expectedErr, err != nil, "got error %v", err)
if err == nil {
res, err := ioutil.ReadAll(rdr)
assert.Nil(t, err)
assert.Equal(t, test.expectedRes, res)
}
})
}
}
func TestDeleteBackup(t *testing.T) {
tests := []struct {
name string
bucket string
backupName string
storage map[string]map[string][]byte
expectedErr bool
expectedRes map[string][]byte
}{
{
name: "normal case",
bucket: "test-bucket",
backupName: "bak",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"bak/bak.tar.gz": nil,
"bak/ark-backup.json": nil,
},
},
expectedErr: false,
expectedRes: make(map[string][]byte),
},
{
name: "failed delete of backup doesn't prevent metadata delete but returns error",
bucket: "test-bucket",
backupName: "bak",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"bak/ark-backup.json": nil,
},
},
expectedErr: true,
expectedRes: make(map[string][]byte),
},
{
name: "failed delete of metadata returns error",
bucket: "test-bucket",
backupName: "bak",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"bak/bak.tar.gz": nil,
},
},
expectedErr: true,
expectedRes: make(map[string][]byte),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
objStore := &fakeObjectStorage{storage: test.storage}
backupService := NewBackupService(objStore)
res := backupService.DeleteBackup(test.bucket, test.backupName)
assert.Equal(t, test.expectedErr, res != nil, "got error %v", res)
assert.Equal(t, test.expectedRes, objStore.storage[test.bucket])
})
}
}
func TestGetAllBackups(t *testing.T) {
tests := []struct {
name string
bucket string
storage map[string]map[string][]byte
expectedRes []*api.Backup
expectedErr bool
}{
{
name: "normal case",
bucket: "test-bucket",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"backup-2/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-2"}}),
},
},
expectedErr: false,
expectedRes: []*api.Backup{
&api.Backup{
TypeMeta: metav1.TypeMeta{Kind: "Backup", APIVersion: "ark.heptio.com/v1"},
ObjectMeta: metav1.ObjectMeta{Name: "backup-1"},
},
&api.Backup{
TypeMeta: metav1.TypeMeta{Kind: "Backup", APIVersion: "ark.heptio.com/v1"},
ObjectMeta: metav1.ObjectMeta{Name: "backup-2"},
},
},
},
{
name: "decode error returns nil/error",
bucket: "test-bucket",
storage: map[string]map[string][]byte{
"test-bucket": map[string][]byte{
"backup-1/ark-backup.json": encodeToBytes(&api.Backup{ObjectMeta: metav1.ObjectMeta{Name: "backup-1"}}),
"backup-2/ark-backup.json": []byte("this is not valid backup JSON"),
},
},
expectedErr: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
objStore := &fakeObjectStorage{storage: test.storage}
backupService := NewBackupService(objStore)
res, err := backupService.GetAllBackups(test.bucket)
assert.Equal(t, test.expectedErr, err != nil, "got error %v", err)
assert.Equal(t, test.expectedRes, res)
})
}
}
func jsonMarshal(obj interface{}) []byte {
res, err := json.Marshal(obj)
if err != nil {
panic(err)
}
return res
}
func encodeToBytes(obj runtime.Object) []byte {
res, err := encode.Encode(obj, "json")
if err != nil {
panic(err)
}
return res
}
type stringReadSeeker struct {
*strings.Reader
}
func newStringReadSeeker(s string) *stringReadSeeker {
return &stringReadSeeker{
Reader: strings.NewReader(s),
}
}
func (srs *stringReadSeeker) Seek(offset int64, whence int) (int64, error) {
panic("not implemented")
}
type fakeObjectStorage struct {
storage map[string]map[string][]byte
returnErrors map[string]map[string]interface{}
}
func (os *fakeObjectStorage) PutObject(bucket string, key string, body io.ReadSeeker) error {
if os.returnErrors["putobject"] != nil && os.returnErrors["putobject"][bucket+"||"+key] != nil {
return errors.New("error")
}
if os.storage[bucket] == nil {
return errors.New("bucket not found")
}
data, err := ioutil.ReadAll(body)
if err != nil {
return err
}
os.storage[bucket][key] = data
return nil
}
func (os *fakeObjectStorage) GetObject(bucket string, key string) (io.ReadCloser, error) {
if os.storage == nil {
return nil, errors.New("storage not initialized")
}
if os.storage[bucket] == nil {
return nil, errors.New("bucket not found")
}
if os.storage[bucket][key] == nil {
return nil, errors.New("key not found")
}
return ioutil.NopCloser(bytes.NewReader(os.storage[bucket][key])), nil
}
func (os *fakeObjectStorage) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
if os.storage == nil {
return nil, errors.New("storage not initialized")
}
if os.storage[bucket] == nil {
return nil, errors.New("bucket not found")
}
prefixes := sets.NewString()
for key := range os.storage[bucket] {
delimIdx := strings.LastIndex(key, delimiter)
if delimIdx == -1 {
prefixes.Insert(key)
}
prefixes.Insert(key[0:delimIdx])
}
return prefixes.List(), nil
}
func (os *fakeObjectStorage) DeleteObject(bucket string, key string) error {
if os.storage == nil {
return errors.New("storage not initialized")
}
if os.storage[bucket] == nil {
return errors.New("bucket not found")
}
if _, exists := os.storage[bucket][key]; !exists {
return errors.New("key not found")
}
delete(os.storage[bucket], key)
return nil
}

View File

@@ -0,0 +1,154 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"strings"
"time"
uuid "github.com/satori/go.uuid"
"google.golang.org/api/compute/v0.beta"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/heptio/ark/pkg/cloudprovider"
)
type blockStorageAdapter struct {
gce *compute.Service
project string
zone string
}
var _ cloudprovider.BlockStorageAdapter = &blockStorageAdapter{}
func (op *blockStorageAdapter) CreateVolumeFromSnapshot(snapshotID string, volumeType string, iops *int) (volumeID string, err error) {
res, err := op.gce.Snapshots.Get(op.project, snapshotID).Do()
if err != nil {
return "", err
}
disk := &compute.Disk{
Name: "restore-" + uuid.NewV4().String(),
SourceSnapshot: res.SelfLink,
Type: volumeType,
}
if _, err = op.gce.Disks.Insert(op.project, op.zone, disk).Do(); err != nil {
return "", err
}
return disk.Name, nil
}
func (op *blockStorageAdapter) GetVolumeInfo(volumeID string) (string, *int, error) {
res, err := op.gce.Disks.Get(op.project, op.zone, volumeID).Do()
if err != nil {
return "", nil, err
}
return res.Type, nil, nil
}
func (op *blockStorageAdapter) IsVolumeReady(volumeID string) (ready bool, err error) {
disk, err := op.gce.Disks.Get(op.project, op.zone, volumeID).Do()
if err != nil {
return false, err
}
// TODO can we consider a disk ready while it's in the RESTORING state?
return disk.Status == "READY", nil
}
func (op *blockStorageAdapter) ListSnapshots(tagFilters map[string]string) ([]string, error) {
useParentheses := len(tagFilters) > 1
subFilters := make([]string, 0, len(tagFilters))
for k, v := range tagFilters {
fs := k + " eq " + v
if useParentheses {
fs = "(" + fs + ")"
}
subFilters = append(subFilters, fs)
}
filter := strings.Join(subFilters, " ")
res, err := op.gce.Snapshots.List(op.project).Filter(filter).Do()
if err != nil {
return nil, err
}
ret := make([]string, 0, len(res.Items))
for _, snap := range res.Items {
ret = append(ret, snap.Name)
}
return ret, nil
}
func (op *blockStorageAdapter) CreateSnapshot(volumeID string, tags map[string]string) (string, error) {
// snapshot names must adhere to RFC1035 and be 1-63 characters
// long
var snapshotName string
suffix := "-" + uuid.NewV4().String()
if len(volumeID) <= (63 - len(suffix)) {
snapshotName = volumeID + suffix
} else {
snapshotName = volumeID[0:63-len(suffix)] + suffix
}
gceSnap := compute.Snapshot{
Name: snapshotName,
}
_, err := op.gce.Disks.CreateSnapshot(op.project, op.zone, volumeID, &gceSnap).Do()
if err != nil {
return "", err
}
// the snapshot is not immediately available after creation for putting labels
// on it. poll for a period of time.
if pollErr := wait.Poll(1*time.Second, 30*time.Second, func() (bool, error) {
if res, err := op.gce.Snapshots.Get(op.project, gceSnap.Name).Do(); err == nil {
gceSnap = *res
return true, nil
}
return false, nil
}); pollErr != nil {
return "", err
}
labels := &compute.GlobalSetLabelsRequest{
Labels: tags,
LabelFingerprint: gceSnap.LabelFingerprint,
}
_, err = op.gce.Snapshots.SetLabels(op.project, gceSnap.Name, labels).Do()
if err != nil {
return "", err
}
return gceSnap.Name, nil
}
func (op *blockStorageAdapter) DeleteSnapshot(snapshotID string) error {
_, err := op.gce.Snapshots.Delete(op.project, snapshotID).Do()
return err
}

View File

@@ -0,0 +1,73 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"io"
"strings"
storage "google.golang.org/api/storage/v1"
"github.com/heptio/ark/pkg/cloudprovider"
)
type objectStorageAdapter struct {
project string
zone string
gcs *storage.Service
}
var _ cloudprovider.ObjectStorageAdapter = &objectStorageAdapter{}
func (op *objectStorageAdapter) PutObject(bucket string, key string, body io.ReadSeeker) error {
obj := &storage.Object{
Name: key,
}
_, err := op.gcs.Objects.Insert(bucket, obj).Media(body).Do()
return err
}
func (op *objectStorageAdapter) GetObject(bucket string, key string) (io.ReadCloser, error) {
res, err := op.gcs.Objects.Get(bucket, key).Download()
if err != nil {
return nil, err
}
return res.Body, nil
}
func (op *objectStorageAdapter) ListCommonPrefixes(bucket string, delimiter string) ([]string, error) {
res, err := op.gcs.Objects.List(bucket).Delimiter(delimiter).Do()
if err != nil {
return nil, err
}
// GCP returns prefixes inclusive of the last delimiter. We need to strip
// it.
ret := make([]string, 0, len(res.Prefixes))
for _, prefix := range res.Prefixes {
ret = append(ret, prefix[0:strings.LastIndex(prefix, delimiter)])
}
return ret, nil
}
func (op *objectStorageAdapter) DeleteObject(bucket string, key string) error {
return op.gcs.Objects.Delete(bucket, key).Do()
}

View File

@@ -0,0 +1,72 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gcp
import (
"golang.org/x/oauth2"
"golang.org/x/oauth2/google"
"google.golang.org/api/compute/v0.beta"
"google.golang.org/api/storage/v1"
"github.com/heptio/ark/pkg/cloudprovider"
)
type storageAdapter struct {
blockStorage *blockStorageAdapter
objectStorage *objectStorageAdapter
}
var _ cloudprovider.StorageAdapter = &storageAdapter{}
func NewStorageAdapter(project string, zone string) (cloudprovider.StorageAdapter, error) {
client, err := google.DefaultClient(oauth2.NoContext, compute.ComputeScope, storage.DevstorageReadWriteScope)
if err != nil {
return nil, err
}
gce, err := compute.New(client)
if err != nil {
return nil, err
}
gcs, err := storage.New(client)
if err != nil {
return nil, err
}
return &storageAdapter{
objectStorage: &objectStorageAdapter{
gcs: gcs,
project: project,
zone: zone,
},
blockStorage: &blockStorageAdapter{
gce: gce,
project: project,
zone: zone,
},
}, nil
}
func (op *storageAdapter) ObjectStorage() cloudprovider.ObjectStorageAdapter {
return op.objectStorage
}
func (op *storageAdapter) BlockStorage() cloudprovider.BlockStorageAdapter {
return op.blockStorage
}

View File

@@ -0,0 +1,121 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import (
"fmt"
"time"
)
// SnapshotService exposes Ark-specific operations for snapshotting and restoring block
// volumes.
type SnapshotService interface {
// GetAllSnapshots returns a slice of all snapshots found in the cloud API that
// are tagged with Ark metadata. Returns an error if a problem is encountered accessing
// the cloud API.
GetAllSnapshots() ([]string, error)
// CreateSnapshot triggers a snapshot for the specified cloud volume and tags it with metadata.
// it returns the cloud snapshot ID, or an error if a problem is encountered triggering the snapshot via
// the cloud API.
CreateSnapshot(volumeID string) (string, error)
// CreateVolumeFromSnapshot triggers a restore operation to create a new cloud volume from the specified
// snapshot and volume characteristics. Returns the cloud volume ID, or an error if a problem is
// encountered triggering the restore via the cloud API.
CreateVolumeFromSnapshot(snapshotID, volumeType string, iops *int) (string, error)
// DeleteSnapshot triggers a deletion of the specified Ark snapshot via the cloud API. It returns an
// error if a problem is encountered triggering the deletion via the cloud API.
DeleteSnapshot(snapshotID string) error
// GetVolumeInfo gets the type and IOPS (if applicable) from the cloud API.
GetVolumeInfo(volumeID string) (string, *int, error)
}
const (
volumeCreateWaitTimeout = 30 * time.Second
volumeCreatePollInterval = 1 * time.Second
snapshotTagKey = "tag-key"
snapshotTagVal = "ark-snapshot"
)
type snapshotService struct {
blockStorage BlockStorageAdapter
}
var _ SnapshotService = &snapshotService{}
// NewSnapshotService creates a snapshot service using the provided block storage adapter
func NewSnapshotService(blockStorage BlockStorageAdapter) SnapshotService {
return &snapshotService{
blockStorage: blockStorage,
}
}
func (sr *snapshotService) CreateVolumeFromSnapshot(snapshotID string, volumeType string, iops *int) (string, error) {
volumeID, err := sr.blockStorage.CreateVolumeFromSnapshot(snapshotID, volumeType, iops)
if err != nil {
return "", err
}
// wait for volume to be ready (up to a maximum time limit)
ticker := time.NewTicker(volumeCreatePollInterval)
defer ticker.Stop()
timeout := time.NewTimer(volumeCreateWaitTimeout)
for {
select {
case <-timeout.C:
return "", fmt.Errorf("timeout reached waiting for volume %v to be ready", volumeID)
case <-ticker.C:
if ready, err := sr.blockStorage.IsVolumeReady(volumeID); err == nil && ready {
return volumeID, nil
}
}
}
}
func (sr *snapshotService) GetAllSnapshots() ([]string, error) {
tags := map[string]string{
snapshotTagKey: snapshotTagVal,
}
res, err := sr.blockStorage.ListSnapshots(tags)
if err != nil {
return nil, err
}
return res, nil
}
func (sr *snapshotService) CreateSnapshot(volumeID string) (string, error) {
tags := map[string]string{
snapshotTagKey: snapshotTagVal,
}
return sr.blockStorage.CreateSnapshot(volumeID, tags)
}
func (sr *snapshotService) DeleteSnapshot(snapshotID string) error {
return sr.blockStorage.DeleteSnapshot(snapshotID)
}
func (sr *snapshotService) GetVolumeInfo(volumeID string) (string, *int, error) {
return sr.blockStorage.GetVolumeInfo(volumeID)
}

View File

@@ -0,0 +1,72 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cloudprovider
import "io"
// ObjectStorageAdapter exposes basic object-storage operations required
// by Ark.
type ObjectStorageAdapter interface {
// PutObject creates a new object using the data in body within the specified
// object storage bucket with the given key.
PutObject(bucket string, key string, body io.ReadSeeker) error
// GetObject retrieves the object with the given key from the specified
// bucket in object storage.
GetObject(bucket string, key string) (io.ReadCloser, error)
// ListCommonPrefixes gets a list of all object key prefixes that come
// before the provided delimiter (this is often used to simulate a directory
// hierarchy in object storage).
ListCommonPrefixes(bucket string, delimiter string) ([]string, error)
// DeleteObject removes object with the specified key from the given
// bucket.
DeleteObject(bucket string, key string) error
}
// BlockStorageAdapter exposes basic block-storage operations required
// by Ark.
type BlockStorageAdapter interface {
// CreateVolumeFromSnapshot creates a new block volume, initialized from the provided snapshot,
// and with the specified type and IOPS (if using provisioned IOPS).
CreateVolumeFromSnapshot(snapshotID, volumeType string, iops *int) (volumeID string, err error)
// GetVolumeInfo returns the type and IOPS (if using provisioned IOPS) for a specified block
// volume.
GetVolumeInfo(volumeID string) (string, *int, error)
// IsVolumeReady returns whether the specified volume is ready to be used.
IsVolumeReady(volumeID string) (ready bool, err error)
// ListSnapshots returns a list of all snapshots matching the specified set of tag key/values.
ListSnapshots(tagFilters map[string]string) ([]string, error)
// CreateSnapshot creates a snapshot of the specified block volume, and applies the provided
// set of tags to the snapshot.
CreateSnapshot(volumeID string, tags map[string]string) (snapshotID string, err error)
// DeleteSnapshot deletes the specified volume snapshot.
DeleteSnapshot(snapshotID string) error
}
// StorageAdapter exposes object- and block-storage interfaces and associated methods
// for a given storage provider.
type StorageAdapter interface {
ObjectStorage() ObjectStorageAdapter
BlockStorage() BlockStorageAdapter
}

57
pkg/cmd/ark/ark.go Normal file
View File

@@ -0,0 +1,57 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package ark
import (
"flag"
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd/cli/backup"
"github.com/heptio/ark/pkg/cmd/cli/restore"
"github.com/heptio/ark/pkg/cmd/cli/schedule"
"github.com/heptio/ark/pkg/cmd/server"
"github.com/heptio/ark/pkg/cmd/version"
)
func NewCommand(name string) *cobra.Command {
c := &cobra.Command{
Use: name,
Short: "Back up and restore Kubernetes cluster resources.",
Long: `Heptio Ark is a tool for managing disaster recovery, specifically for
Kubernetes cluster resources. It provides a simple, configurable,
and operationally robust way to back up your application state and
associated data.`,
}
f := client.NewFactory()
f.BindFlags(c.PersistentFlags())
c.AddCommand(
backup.NewCommand(f),
schedule.NewCommand(f),
restore.NewCommand(f),
server.NewCommand(),
version.NewCommand(),
)
// add the glog flags
c.PersistentFlags().AddGoFlagSet(flag.CommandLine)
return c
}

View File

@@ -0,0 +1,45 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "backup",
Short: "Work with backups",
Long: "Work with backups",
}
c.AddCommand(
NewCreateCommand(f),
NewGetCommand(f),
// Will implement describe later
// NewDescribeCommand(f),
// If you delete a backup and it still exists in object storage, the backup sync controller will
// recreate it. Until we have a good UX around this, we're disabling the delete command.
// NewDeleteCommand(f),
)
return c
}

View File

@@ -0,0 +1,138 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"errors"
"fmt"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/flag"
"github.com/heptio/ark/pkg/cmd/util/output"
)
func NewCreateCommand(f client.Factory) *cobra.Command {
o := NewCreateOptions()
c := &cobra.Command{
Use: "create NAME",
Short: "Create a backup",
Run: func(c *cobra.Command, args []string) {
cmd.CheckError(o.Validate(c, args))
cmd.CheckError(o.Complete(args))
cmd.CheckError(o.Run(c, f))
},
}
o.BindFlags(c.Flags())
output.BindFlags(c.Flags())
output.ClearOutputFlagDefault(c)
return c
}
type CreateOptions struct {
Name string
TTL time.Duration
SnapshotVolumes bool
IncludeNamespaces flag.StringArray
ExcludeNamespaces flag.StringArray
IncludeResources flag.StringArray
ExcludeResources flag.StringArray
Labels flag.Map
Selector flag.LabelSelector
}
func NewCreateOptions() *CreateOptions {
return &CreateOptions{
TTL: 24 * time.Hour,
IncludeNamespaces: flag.NewStringArray("*"),
Labels: flag.NewMap(),
}
}
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
flags.DurationVar(&o.TTL, "ttl", o.TTL, "how long before the backup can be garbage collected")
flags.BoolVar(&o.SnapshotVolumes, "snapshot-volumes", o.SnapshotVolumes, "take snapshots of PersistentVolumes as part of the backup")
flags.Var(&o.IncludeNamespaces, "include-namespaces", "namespaces to include in the backup (use '*' for all namespaces)")
flags.Var(&o.ExcludeNamespaces, "exclude-namespaces", "namespaces to exclude from the backup")
flags.Var(&o.IncludeResources, "include-resources", "resources to include in the backup, formatted as resource.group, such as storageclasses.storage.k8s.io (use '*' for all resources)")
flags.Var(&o.ExcludeResources, "exclude-resources", "resources to exclude from the backup, formatted as resource.group, such as storageclasses.storage.k8s.io")
flags.Var(&o.Labels, "labels", "labels to apply to the backup")
flags.VarP(&o.Selector, "selector", "l", "only back up resources matching this label selector")
}
func (o *CreateOptions) Validate(c *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("you must specify only one argument, the backup's name")
}
if err := output.ValidateFlags(c); err != nil {
return err
}
return nil
}
func (o *CreateOptions) Complete(args []string) error {
o.Name = args[0]
return nil
}
func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
arkClient, err := f.Client()
if err != nil {
return err
}
backup := &api.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: api.DefaultNamespace,
Name: o.Name,
Labels: o.Labels.Data(),
},
Spec: api.BackupSpec{
IncludedNamespaces: o.IncludeNamespaces,
ExcludedNamespaces: o.ExcludeNamespaces,
IncludedResources: o.IncludeResources,
ExcludedResources: o.ExcludeResources,
LabelSelector: o.Selector.LabelSelector,
SnapshotVolumes: o.SnapshotVolumes,
TTL: metav1.Duration{Duration: o.TTL},
},
}
if printed, err := output.PrintWithFormat(c, backup); printed || err != nil {
return err
}
_, err = arkClient.ArkV1().Backups(backup.Namespace).Create(backup)
if err != nil {
return err
}
fmt.Printf("Backup %q created successfully.\n", backup.Name)
return nil
}

View File

@@ -0,0 +1,53 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"fmt"
"os"
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
)
func NewDeleteCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "delete NAME",
Short: "Delete a backup",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.Usage()
os.Exit(1)
}
arkClient, err := f.Client()
cmd.CheckError(err)
backupName := args[0]
err = arkClient.ArkV1().Backups(api.DefaultNamespace).Delete(backupName, nil)
cmd.CheckError(err)
fmt.Printf("Backup %q deleted\n", backupName)
},
}
return c
}

View File

@@ -0,0 +1,34 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewDescribeCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "describe",
Short: "Describe a backup",
Run: func(c *cobra.Command, args []string) {
},
}
return c
}

66
pkg/cmd/cli/backup/get.go Normal file
View File

@@ -0,0 +1,66 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package backup
import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/output"
)
func NewGetCommand(f client.Factory) *cobra.Command {
var listOptions metav1.ListOptions
c := &cobra.Command{
Use: "get",
Short: "Get backups",
Run: func(c *cobra.Command, args []string) {
err := output.ValidateFlags(c)
cmd.CheckError(err)
arkClient, err := f.Client()
cmd.CheckError(err)
var backups *api.BackupList
if len(args) > 0 {
backups = new(api.BackupList)
for _, name := range args {
backup, err := arkClient.Ark().Backups(api.DefaultNamespace).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
backups.Items = append(backups.Items, *backup)
}
} else {
backups, err = arkClient.ArkV1().Backups(api.DefaultNamespace).List(metav1.ListOptions{})
cmd.CheckError(err)
}
_, err = output.PrintWithFormat(c, backups)
cmd.CheckError(err)
},
}
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
output.BindFlags(c.Flags())
return c
}

View File

@@ -0,0 +1,38 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "config",
Short: "Work with config",
Long: "Work with config",
}
c.AddCommand(
NewGetCommand(f),
NewSetCommand(f),
)
return c
}

29
pkg/cmd/cli/config/get.go Normal file
View File

@@ -0,0 +1,29 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewGetCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{}
return c
}

29
pkg/cmd/cli/config/set.go Normal file
View File

@@ -0,0 +1,29 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package config
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewSetCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{}
return c
}

View File

@@ -0,0 +1,129 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"errors"
"fmt"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/flag"
"github.com/heptio/ark/pkg/cmd/util/output"
)
func NewCreateCommand(f client.Factory) *cobra.Command {
o := NewCreateOptions()
c := &cobra.Command{
Use: "create BACKUP",
Short: "Create a restore",
Run: func(c *cobra.Command, args []string) {
cmd.CheckError(o.Validate(c, args))
cmd.CheckError(o.Complete(args))
cmd.CheckError(o.Run(c, f))
},
}
o.BindFlags(c.Flags())
output.BindFlags(c.Flags())
output.ClearOutputFlagDefault(c)
return c
}
type CreateOptions struct {
BackupName string
RestoreVolumes bool
Labels flag.Map
Namespaces flag.StringArray
NamespaceMappings flag.Map
Selector flag.LabelSelector
}
func NewCreateOptions() *CreateOptions {
return &CreateOptions{
Labels: flag.NewMap(),
NamespaceMappings: flag.NewMap().WithEntryDelimiter(",").WithKeyValueDelimiter(":"),
}
}
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
flags.BoolVar(&o.RestoreVolumes, "restore-volumes", o.RestoreVolumes, "whether to restore volumes from snapshots")
flags.Var(&o.Labels, "labels", "labels to apply to the restore")
flags.Var(&o.Namespaces, "namespaces", "comma-separated list of namespaces to restore")
flags.Var(&o.NamespaceMappings, "namespace-mappings", "namespace mappings from name in the backup to desired restored name in the form src1:dst1,src2:dst2,...")
flags.VarP(&o.Selector, "selector", "l", "only restore resources matching this label selector")
}
func (o *CreateOptions) Validate(c *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("you must specify only one argument, the backup's name")
}
if err := output.ValidateFlags(c); err != nil {
return err
}
return nil
}
func (o *CreateOptions) Complete(args []string) error {
o.BackupName = args[0]
return nil
}
func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
arkClient, err := f.Client()
if err != nil {
return err
}
restore := &api.Restore{
ObjectMeta: metav1.ObjectMeta{
Namespace: api.DefaultNamespace,
Name: fmt.Sprintf("%s-%s", o.BackupName, time.Now().Format("20060102150405")),
Labels: o.Labels.Data(),
},
Spec: api.RestoreSpec{
BackupName: o.BackupName,
Namespaces: o.Namespaces,
NamespaceMapping: o.NamespaceMappings.Data(),
LabelSelector: o.Selector.LabelSelector,
RestorePVs: o.RestoreVolumes,
},
}
if printed, err := output.PrintWithFormat(c, restore); printed || err != nil {
return err
}
restore, err = arkClient.ArkV1().Restores(restore.Namespace).Create(restore)
if err != nil {
return err
}
fmt.Printf("Restore %q created successfully.\n", restore.Name)
return nil
}

View File

@@ -0,0 +1,53 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"fmt"
"os"
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
)
func NewDeleteCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "delete NAME",
Short: "Delete a restore",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.Usage()
os.Exit(1)
}
arkClient, err := f.Client()
cmd.CheckError(err)
name := args[0]
err = arkClient.ArkV1().Restores(api.DefaultNamespace).Delete(name, nil)
cmd.CheckError(err)
fmt.Printf("Restore %q deleted\n", name)
},
}
return c
}

View File

@@ -0,0 +1,34 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewDescribeCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "describe",
Short: "Describe a backup",
Run: func(c *cobra.Command, args []string) {
},
}
return c
}

View File

@@ -0,0 +1,71 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/output"
)
func NewGetCommand(f client.Factory) *cobra.Command {
var listOptions metav1.ListOptions
c := &cobra.Command{
Use: "get",
Short: "get restores",
Run: func(c *cobra.Command, args []string) {
err := output.ValidateFlags(c)
cmd.CheckError(err)
arkClient, err := f.Client()
cmd.CheckError(err)
var restores *api.RestoreList
if len(args) > 0 {
restores = new(api.RestoreList)
for _, name := range args {
restore, err := arkClient.Ark().Restores(api.DefaultNamespace).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
restores.Items = append(restores.Items, *restore)
}
} else {
restores, err = arkClient.ArkV1().Restores(api.DefaultNamespace).List(metav1.ListOptions{})
cmd.CheckError(err)
}
if printed, err := output.PrintWithFormat(c, restores); printed || err != nil {
cmd.CheckError(err)
return
}
_, err = output.PrintWithFormat(c, restores)
cmd.CheckError(err)
},
}
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
output.BindFlags(c.Flags())
return c
}

View File

@@ -0,0 +1,41 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package restore
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "restore",
Short: "Work with restores",
Long: "Work with restores",
}
c.AddCommand(
NewCreateCommand(f),
NewGetCommand(f),
// Will implement later
// NewDescribeCommand(f),
NewDeleteCommand(f),
)
return c
}

View File

@@ -0,0 +1,124 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedule
import (
"errors"
"fmt"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/cli/backup"
"github.com/heptio/ark/pkg/cmd/util/output"
)
func NewCreateCommand(f client.Factory) *cobra.Command {
o := NewCreateOptions()
c := &cobra.Command{
Use: "create NAME",
Short: "Create a schedule",
Run: func(c *cobra.Command, args []string) {
cmd.CheckError(o.Validate(c, args))
cmd.CheckError(o.Complete(args))
cmd.CheckError(o.Run(c, f))
},
}
o.BindFlags(c.Flags())
output.BindFlags(c.Flags())
output.ClearOutputFlagDefault(c)
return c
}
type CreateOptions struct {
BackupOptions *backup.CreateOptions
Schedule string
labelSelector *metav1.LabelSelector
}
func NewCreateOptions() *CreateOptions {
return &CreateOptions{
BackupOptions: backup.NewCreateOptions(),
}
}
func (o *CreateOptions) BindFlags(flags *pflag.FlagSet) {
o.BackupOptions.BindFlags(flags)
flags.StringVar(&o.Schedule, "schedule", o.Schedule, "a cron expression specifying a recurring schedule for this backup to run")
}
func (o *CreateOptions) Validate(c *cobra.Command, args []string) error {
if len(args) != 1 {
return errors.New("you must specify only one argument, the schedule's name")
}
if len(o.Schedule) == 0 {
return errors.New("--schedule is required")
}
return o.BackupOptions.Validate(c, args)
}
func (o *CreateOptions) Complete(args []string) error {
return o.BackupOptions.Complete(args)
}
func (o *CreateOptions) Run(c *cobra.Command, f client.Factory) error {
arkClient, err := f.Client()
if err != nil {
return err
}
schedule := &api.Schedule{
ObjectMeta: metav1.ObjectMeta{
Namespace: api.DefaultNamespace,
Name: o.BackupOptions.Name,
},
Spec: api.ScheduleSpec{
Template: api.BackupSpec{
IncludedNamespaces: o.BackupOptions.IncludeNamespaces,
ExcludedNamespaces: o.BackupOptions.ExcludeNamespaces,
IncludedResources: o.BackupOptions.IncludeResources,
ExcludedResources: o.BackupOptions.ExcludeResources,
LabelSelector: o.BackupOptions.Selector.LabelSelector,
SnapshotVolumes: o.BackupOptions.SnapshotVolumes,
TTL: metav1.Duration{Duration: o.BackupOptions.TTL},
},
Schedule: o.Schedule,
},
}
if printed, err := output.PrintWithFormat(c, schedule); printed || err != nil {
return err
}
_, err = arkClient.ArkV1().Schedules(schedule.Namespace).Create(schedule)
if err != nil {
return err
}
fmt.Printf("Schedule %q created successfully.\n", schedule.Name)
return nil
}

View File

@@ -0,0 +1,53 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedule
import (
"fmt"
"os"
"github.com/spf13/cobra"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
)
func NewDeleteCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "delete NAME",
Short: "Delete a schedule",
Run: func(c *cobra.Command, args []string) {
if len(args) != 1 {
c.Usage()
os.Exit(1)
}
arkClient, err := f.Client()
cmd.CheckError(err)
name := args[0]
err = arkClient.ArkV1().Schedules(api.DefaultNamespace).Delete(name, nil)
cmd.CheckError(err)
fmt.Printf("Schedule %q deleted\n", name)
},
}
return c
}

View File

@@ -0,0 +1,34 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedule
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewDescribeCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "describe",
Short: "Describe a backup",
Run: func(c *cobra.Command, args []string) {
},
}
return c
}

View File

@@ -0,0 +1,71 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedule
import (
"github.com/spf13/cobra"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/cmd/util/output"
)
func NewGetCommand(f client.Factory) *cobra.Command {
var listOptions metav1.ListOptions
c := &cobra.Command{
Use: "get",
Short: "Get schedules",
Run: func(c *cobra.Command, args []string) {
err := output.ValidateFlags(c)
cmd.CheckError(err)
arkClient, err := f.Client()
cmd.CheckError(err)
var schedules *api.ScheduleList
if len(args) > 0 {
schedules = new(api.ScheduleList)
for _, name := range args {
schedule, err := arkClient.Ark().Schedules(api.DefaultNamespace).Get(name, metav1.GetOptions{})
cmd.CheckError(err)
schedules.Items = append(schedules.Items, *schedule)
}
} else {
schedules, err = arkClient.ArkV1().Schedules(api.DefaultNamespace).List(metav1.ListOptions{})
cmd.CheckError(err)
}
if printed, err := output.PrintWithFormat(c, schedules); printed || err != nil {
cmd.CheckError(err)
return
}
_, err = output.PrintWithFormat(c, schedules)
cmd.CheckError(err)
},
}
c.Flags().StringVarP(&listOptions.LabelSelector, "selector", "l", listOptions.LabelSelector, "only show items matching this label selector")
output.BindFlags(c.Flags())
return c
}

View File

@@ -0,0 +1,41 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schedule
import (
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/client"
)
func NewCommand(f client.Factory) *cobra.Command {
c := &cobra.Command{
Use: "schedule",
Short: "Work with schedules",
Long: "Work with schedules",
}
c.AddCommand(
NewCreateCommand(f),
NewGetCommand(f),
// Will implement later
// NewDescribeCommand(f),
NewDeleteCommand(f),
)
return c
}

34
pkg/cmd/errors.go Normal file
View File

@@ -0,0 +1,34 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cmd
import (
"context"
"fmt"
"os"
)
// CheckError prints err to stderr and exits with code 1 if err is not nil. Otherwise, it is a
// no-op.
func CheckError(err error) {
if err != nil {
if err != context.Canceled {
fmt.Fprintf(os.Stderr, fmt.Sprintf("An error occurred: %v\n", err))
}
os.Exit(1)
}
}

538
pkg/cmd/server/server.go Normal file
View File

@@ -0,0 +1,538 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"context"
"errors"
"fmt"
"reflect"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/endpoints"
"github.com/golang/glog"
"github.com/spf13/cobra"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/pkg/api/v1"
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/backup"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cloudprovider"
arkaws "github.com/heptio/ark/pkg/cloudprovider/aws"
"github.com/heptio/ark/pkg/cloudprovider/azure"
"github.com/heptio/ark/pkg/cloudprovider/gcp"
"github.com/heptio/ark/pkg/cmd"
"github.com/heptio/ark/pkg/controller"
arkdiscovery "github.com/heptio/ark/pkg/discovery"
"github.com/heptio/ark/pkg/generated/clientset"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
"github.com/heptio/ark/pkg/restore"
"github.com/heptio/ark/pkg/restore/restorers"
"github.com/heptio/ark/pkg/util/kube"
)
func NewCommand() *cobra.Command {
var kubeconfig string
var command = &cobra.Command{
Use: "server",
Short: "Run the ark server",
Long: "Run the ark server",
Run: func(c *cobra.Command, args []string) {
s, err := newServer(kubeconfig)
cmd.CheckError(err)
cmd.CheckError(s.run())
},
}
command.Flags().StringVar(&kubeconfig, "kubeconfig", "", "Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration")
return command
}
type server struct {
kubeClient kubernetes.Interface
apiExtensionsClient apiextensionsclient.Interface
arkClient clientset.Interface
backupService cloudprovider.BackupService
snapshotService cloudprovider.SnapshotService
discoveryClient discovery.DiscoveryInterface
clientPool dynamic.ClientPool
sharedInformerFactory informers.SharedInformerFactory
ctx context.Context
cancelFunc context.CancelFunc
}
func newServer(kubeconfig string) (*server, error) {
clientConfig, err := client.Config(kubeconfig)
if err != nil {
return nil, err
}
kubeClient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
apiExtensionsClient, err := apiextensionsclient.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
arkClient, err := clientset.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
ctx, cancelFunc := context.WithCancel(context.Background())
s := &server{
kubeClient: kubeClient,
apiExtensionsClient: apiExtensionsClient,
arkClient: arkClient,
discoveryClient: apiExtensionsClient.Discovery(),
clientPool: dynamic.NewDynamicClientPool(clientConfig),
sharedInformerFactory: informers.NewSharedInformerFactory(arkClient, 0),
ctx: ctx,
cancelFunc: cancelFunc,
}
return s, nil
}
func (s *server) run() error {
if err := s.ensureArkNamespace(); err != nil {
return err
}
config, err := s.loadConfig()
if err != nil {
return err
}
applyConfigDefaults(config)
s.watchConfig(config)
if err := s.initBackupService(config); err != nil {
return err
}
if err := s.initSnapshotService(config); err != nil {
return err
}
if err := s.runControllers(config); err != nil {
return err
}
return nil
}
func (s *server) ensureArkNamespace() error {
glog.Infof("Ensuring %s namespace exists for backups", api.DefaultNamespace)
defaultNamespace := v1.Namespace{
ObjectMeta: metav1.ObjectMeta{
Name: api.DefaultNamespace,
},
}
if created, err := kube.EnsureNamespaceExists(&defaultNamespace, s.kubeClient.CoreV1().Namespaces()); created {
glog.Infof("Namespace created")
} else if err != nil {
return err
}
glog.Infof("Namespace already exists")
return nil
}
func (s *server) loadConfig() (*api.Config, error) {
glog.Infof("Retrieving Ark configuration")
var (
config *api.Config
err error
)
for {
config, err = s.arkClient.ArkV1().Configs(api.DefaultNamespace).Get("default", metav1.GetOptions{})
if err == nil {
break
}
if !apierrors.IsNotFound(err) {
glog.Errorf("error retrieving configuration: %v", err)
}
glog.Infof("Will attempt to retrieve configuration again in 5 seconds")
time.Sleep(5 * time.Second)
}
glog.Infof("Successfully retrieved Ark configuration")
return config, nil
}
const (
defaultGCSyncPeriod = 60 * time.Minute
defaultBackupSyncPeriod = 60 * time.Minute
defaultScheduleSyncPeriod = time.Minute
)
var defaultResourcePriorities = []string{
"namespaces",
"persistentvolumes",
"persistentvolumeclaims",
"secrets",
"configmaps",
}
func applyConfigDefaults(c *api.Config) {
if c.GCSyncPeriod.Duration == 0 {
c.GCSyncPeriod.Duration = defaultGCSyncPeriod
}
if c.BackupSyncPeriod.Duration == 0 {
c.BackupSyncPeriod.Duration = defaultBackupSyncPeriod
}
if c.ScheduleSyncPeriod.Duration == 0 {
c.ScheduleSyncPeriod.Duration = defaultScheduleSyncPeriod
}
if len(c.ResourcePriorities) == 0 {
c.ResourcePriorities = defaultResourcePriorities
glog.Infof("Using default resource priorities: %v", c.ResourcePriorities)
} else {
glog.Infof("Using resource priorities from config: %v", c.ResourcePriorities)
}
}
// watchConfig adds an update event handler to the Config shared informer, invoking s.cancelFunc
// when it sees a change.
func (s *server) watchConfig(config *api.Config) {
s.sharedInformerFactory.Ark().V1().Configs().Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: func(oldObj, newObj interface{}) {
updated := newObj.(*api.Config)
if updated.Name != config.Name {
glog.V(5).Infof("config watch channel received other config %q", updated.Name)
return
}
if !reflect.DeepEqual(config, updated) {
glog.Infof("Detected a config change. Gracefully shutting down")
s.cancelFunc()
}
},
})
}
func (s *server) initBackupService(config *api.Config) error {
glog.Infof("Configuring cloud provider for backup service")
cloud, err := initCloud(config.BackupStorageProvider.CloudProviderConfig, "backupStorageProvider")
if err != nil {
return err
}
s.backupService = cloudprovider.NewBackupService(cloud.ObjectStorage())
return nil
}
func (s *server) initSnapshotService(config *api.Config) error {
glog.Infof("Configuring cloud provider for snapshot service")
cloud, err := initCloud(config.PersistentVolumeProvider, "persistentVolumeProvider")
if err != nil {
return err
}
s.snapshotService = cloudprovider.NewSnapshotService(cloud.BlockStorage())
return nil
}
func initCloud(config api.CloudProviderConfig, field string) (cloudprovider.StorageAdapter, error) {
var (
cloud cloudprovider.StorageAdapter
err error
)
if config.AWS != nil {
cloud, err = getAWSCloudProvider(config)
}
if config.GCP != nil {
if cloud != nil {
return nil, fmt.Errorf("you may only specify one of aws, gcp, or azure for %s", field)
}
cloud, err = getGCPCloudProvider(config)
}
if config.Azure != nil {
if cloud != nil {
return nil, fmt.Errorf("you may only specify one of aws, gcp, or azure for %s", field)
}
cloud, err = getAzureCloudProvider(config)
}
if err != nil {
return nil, err
}
if cloud == nil {
return nil, fmt.Errorf("you must specify one of aws, gcp, or azure for %s", field)
}
return cloud, err
}
func getAWSCloudProvider(cloudConfig api.CloudProviderConfig) (cloudprovider.StorageAdapter, error) {
if cloudConfig.AWS == nil {
return nil, errors.New("missing aws configuration in config file")
}
if cloudConfig.AWS.Region == "" {
return nil, errors.New("missing region in aws configuration in config file")
}
if cloudConfig.AWS.AvailabilityZone == "" {
return nil, errors.New("missing availabilityZone in aws configuration in config file")
}
awsConfig := aws.NewConfig().
WithRegion(cloudConfig.AWS.Region).
WithS3ForcePathStyle(cloudConfig.AWS.S3ForcePathStyle)
if cloudConfig.AWS.S3Url != "" {
awsConfig = awsConfig.WithEndpointResolver(
endpoints.ResolverFunc(func(service, region string, optFns ...func(*endpoints.Options)) (endpoints.ResolvedEndpoint, error) {
if service == endpoints.S3ServiceID {
return endpoints.ResolvedEndpoint{
URL: cloudConfig.AWS.S3Url,
}, nil
}
return endpoints.DefaultResolver().EndpointFor(service, region, optFns...)
}),
)
}
return arkaws.NewStorageAdapter(awsConfig, cloudConfig.AWS.AvailabilityZone)
}
func getGCPCloudProvider(cloudConfig api.CloudProviderConfig) (cloudprovider.StorageAdapter, error) {
if cloudConfig.GCP == nil {
return nil, errors.New("missing gcp configuration in config file")
}
if cloudConfig.GCP.Project == "" {
return nil, errors.New("missing project in gcp configuration in config file")
}
if cloudConfig.GCP.Zone == "" {
return nil, errors.New("missing zone in gcp configuration in config file")
}
return gcp.NewStorageAdapter(cloudConfig.GCP.Project, cloudConfig.GCP.Zone)
}
func getAzureCloudProvider(cloudConfig api.CloudProviderConfig) (cloudprovider.StorageAdapter, error) {
if cloudConfig.Azure == nil {
return nil, errors.New("missing azure configuration in config file")
}
if cloudConfig.Azure.Location == "" {
return nil, errors.New("missing location in azure configuration in config file")
}
return azure.NewStorageAdapter(cloudConfig.Azure.Location, cloudConfig.Azure.APITimeout.Duration)
}
func durationMin(a, b time.Duration) time.Duration {
if a < b {
return a
}
return b
}
func (s *server) runControllers(config *api.Config) error {
glog.Infof("Starting controllers")
ctx := s.ctx
var wg sync.WaitGroup
cloudBackupCacheResyncPeriod := durationMin(config.GCSyncPeriod.Duration, config.BackupSyncPeriod.Duration)
glog.Infof("Caching cloud backups every %s", cloudBackupCacheResyncPeriod)
s.backupService = cloudprovider.NewBackupServiceWithCachedBackupGetter(
ctx,
s.backupService,
cloudBackupCacheResyncPeriod,
)
backupSyncController := controller.NewBackupSyncController(
s.arkClient.ArkV1(),
s.backupService,
config.BackupStorageProvider.Bucket,
config.BackupSyncPeriod.Duration,
)
wg.Add(1)
go func() {
backupSyncController.Run(ctx, 1)
wg.Done()
}()
discoveryHelper, err := arkdiscovery.NewHelper(s.discoveryClient)
if err != nil {
return err
}
go wait.Until(
func() {
if err := discoveryHelper.Refresh(); err != nil {
glog.Errorf("error refreshing discovery: %v", err)
}
},
5*time.Minute,
ctx.Done(),
)
if config.RestoreOnlyMode {
glog.Infof("Restore only mode - not starting the backup, schedule or GC controllers")
} else {
backupper, err := newBackupper(discoveryHelper, s.clientPool, s.backupService, s.snapshotService)
cmd.CheckError(err)
backupController := controller.NewBackupController(
s.sharedInformerFactory.Ark().V1().Backups(),
s.arkClient.ArkV1(),
backupper,
s.backupService,
config.BackupStorageProvider.Bucket,
)
wg.Add(1)
go func() {
backupController.Run(ctx, 1)
wg.Done()
}()
scheduleController := controller.NewScheduleController(
s.arkClient.ArkV1(),
s.arkClient.ArkV1(),
s.sharedInformerFactory.Ark().V1().Schedules(),
config.ScheduleSyncPeriod.Duration,
)
wg.Add(1)
go func() {
scheduleController.Run(ctx, 1)
wg.Done()
}()
gcController := controller.NewGCController(
s.backupService,
s.snapshotService,
config.BackupStorageProvider.Bucket,
config.GCSyncPeriod.Duration,
s.sharedInformerFactory.Ark().V1().Backups(),
s.arkClient.ArkV1(),
)
wg.Add(1)
go func() {
gcController.Run(ctx, 1)
wg.Done()
}()
}
restorer, err := newRestorer(
discoveryHelper,
s.clientPool,
s.backupService,
s.snapshotService,
config.ResourcePriorities,
s.arkClient.ArkV1(),
s.kubeClient,
)
cmd.CheckError(err)
restoreController := controller.NewRestoreController(
s.sharedInformerFactory.Ark().V1().Restores(),
s.arkClient.ArkV1(),
s.arkClient.ArkV1(),
restorer,
s.backupService,
config.BackupStorageProvider.Bucket,
s.sharedInformerFactory.Ark().V1().Backups(),
)
wg.Add(1)
go func() {
restoreController.Run(ctx, 1)
wg.Done()
}()
// SHARED INFORMERS HAVE TO BE STARTED AFTER ALL CONTROLLERS
go s.sharedInformerFactory.Start(ctx.Done())
glog.Infof("Server started successfully")
<-ctx.Done()
glog.Info("Waiting for all controllers to shut down gracefully")
wg.Wait()
return nil
}
func newBackupper(
discoveryHelper arkdiscovery.Helper,
clientPool dynamic.ClientPool,
backupService cloudprovider.BackupService,
snapshotService cloudprovider.SnapshotService,
) (backup.Backupper, error) {
actions := map[string]backup.Action{}
if snapshotService != nil {
actions["persistentvolumes"] = backup.NewVolumeSnapshotAction(snapshotService)
}
return backup.NewKubernetesBackupper(
discoveryHelper,
client.NewDynamicFactory(clientPool),
actions,
)
}
func newRestorer(
discoveryHelper arkdiscovery.Helper,
clientPool dynamic.ClientPool,
backupService cloudprovider.BackupService,
snapshotService cloudprovider.SnapshotService,
resourcePriorities []string,
backupClient arkv1client.BackupsGetter,
kubeClient kubernetes.Interface,
) (restore.Restorer, error) {
restorers := map[string]restorers.ResourceRestorer{
"persistentvolumes": restorers.NewPersistentVolumeRestorer(snapshotService),
"persistentvolumeclaims": restorers.NewPersistentVolumeClaimRestorer(),
"services": restorers.NewServiceRestorer(),
"namespaces": restorers.NewNamespaceRestorer(),
"pods": restorers.NewPodRestorer(),
"jobs": restorers.NewJobRestorer(),
}
return restore.NewKubernetesRestorer(
discoveryHelper,
client.NewDynamicFactory(clientPool),
restorers,
backupService,
resourcePriorities,
backupClient,
kubeClient.CoreV1().Namespaces(),
)
}

View File

@@ -0,0 +1,50 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package server
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/heptio/ark/pkg/apis/ark/v1"
)
func TestApplyConfigDefaults(t *testing.T) {
c := &v1.Config{}
// test defaulting
applyConfigDefaults(c)
assert.Equal(t, defaultGCSyncPeriod, c.GCSyncPeriod.Duration)
assert.Equal(t, defaultBackupSyncPeriod, c.BackupSyncPeriod.Duration)
assert.Equal(t, defaultScheduleSyncPeriod, c.ScheduleSyncPeriod.Duration)
assert.Equal(t, defaultResourcePriorities, c.ResourcePriorities)
// make sure defaulting doesn't overwrite real values
c.GCSyncPeriod.Duration = 5 * time.Minute
c.BackupSyncPeriod.Duration = 4 * time.Minute
c.ScheduleSyncPeriod.Duration = 3 * time.Minute
c.ResourcePriorities = []string{"a", "b"}
applyConfigDefaults(c)
assert.Equal(t, 5*time.Minute, c.GCSyncPeriod.Duration)
assert.Equal(t, 4*time.Minute, c.BackupSyncPeriod.Duration)
assert.Equal(t, 3*time.Minute, c.ScheduleSyncPeriod.Duration)
assert.Equal(t, []string{"a", "b"}, c.ResourcePriorities)
}

View File

@@ -0,0 +1,77 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"github.com/golang/glog"
"github.com/spf13/cobra"
)
// GetOptionalStringFlag returns the value of the specified flag from a
// cobra command, or the zero value ("") if the flag was not specified.
func GetOptionalStringFlag(cmd *cobra.Command, flagName string) string {
return GetStringFlag(cmd, flagName, false)
}
// GetStringFlag returns the value of the specified flag from a
// cobra command. If the flag is not specified and fatalIfMissing is true,
// this function logs a fatal error and calls os.Exit(255).
func GetStringFlag(cmd *cobra.Command, flagName string, fatalIfMissing bool) string {
s, err := cmd.Flags().GetString(flagName)
if err != nil && fatalIfMissing {
glog.Fatalf("error accessing flag %q for command %s: %v", flagName, cmd.Name(), err)
}
return s
}
// GetOptionalBoolFlag returns the value of the specified flag from a
// cobra command, or the zero value (false) if the flag was not specified.
func GetOptionalBoolFlag(cmd *cobra.Command, flagName string) bool {
return GetBoolFlag(cmd, flagName, false)
}
// GetBoolFlag returns the value of the specified flag from a
// cobra command. If the flag is not specified and fatalIfMissing is true,
// this function logs a fatal error and calls os.Exit(255).
func GetBoolFlag(cmd *cobra.Command, flagName string, fatalIfMissing bool) bool {
b, err := cmd.Flags().GetBool(flagName)
if err != nil && fatalIfMissing {
glog.Fatalf("error accessing flag %q for command %s: %v", flagName, cmd.Name(), err)
}
return b
}
// GetOptionalStringArrayFlag returns the value of the specified flag from a
// cobra command, or the zero value if the flag was not specified.
func GetOptionalStringArrayFlag(cmd *cobra.Command, flagName string) []string {
return GetStringArrayFlag(cmd, flagName, false)
}
// GetStringArrayFlag returns the value of the specified flag from a
// cobra command. If the flag is not specified and fatalIfMissing is true,
// this function logs a fatal error and calls os.Exit(255).
func GetStringArrayFlag(cmd *cobra.Command, flagName string, fatalIfMissing bool) []string {
f := cmd.Flag(flagName)
if f == nil {
if fatalIfMissing {
glog.Fatalf("error accessing flag %q for command %s: not specified", flagName, cmd.Name())
}
return []string{}
}
v := f.Value.(*StringArray)
return *v
}

View File

@@ -0,0 +1,51 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"strings"
)
// StringArray is a Cobra-compatible named type for defining a
// string slice flag.
type StringArray []string
// NewStringArray returns a StringArray for a provided
// slice of values.
func NewStringArray(initial ...string) StringArray {
return StringArray(initial)
}
// String returns a comma-separated list of the items
// in the string array.
func (sa *StringArray) String() string {
return strings.Join(*sa, ",")
}
// Set comma-splits the provided string and assigns
// the results to the receiver. It returns an error if
// the string is not parseable.
func (sa *StringArray) Set(s string) error {
*sa = strings.Split(s, ",")
return nil
}
// Type returns a string representation of the
// StringArray type.
func (sa *StringArray) Type() string {
return "stringArray"
}

View File

@@ -0,0 +1,51 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// LabelSelector is a Cobra-compatible wrapper for defining
// a Kubernetes label-selector flag.
type LabelSelector struct {
LabelSelector *metav1.LabelSelector
}
// String returns a string representation of the label
// selector flag.
func (ls *LabelSelector) String() string {
return metav1.FormatLabelSelector(ls.LabelSelector)
}
// Set parses the provided string and assigns the result
// to the label-selector receiver. It returns an error if
// the string is not parseable.
func (ls *LabelSelector) Set(s string) error {
parsed, err := metav1.ParseToLabelSelector(s)
if err != nil {
return err
}
ls.LabelSelector = parsed
return nil
}
// Type returns a string representation of the
// LabelSelector type.
func (ls *LabelSelector) Type() string {
return "labelSelector"
}

93
pkg/cmd/util/flag/map.go Normal file
View File

@@ -0,0 +1,93 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package flag
import (
"fmt"
"strings"
)
// Map is a Cobra-compatible wrapper for defining a flag containing
// map data (i.e. a collection of key-value pairs).
type Map struct {
data map[string]string
entryDelimiter string
keyValueDelimiter string
}
// NewMap returns a Map using the default delimiters ("=" between keys and
// values, and "," between map entries, e.g. k1=v1,k2=v2)
func NewMap() Map {
m := Map{
data: make(map[string]string),
}
return m.WithEntryDelimiter(",").WithKeyValueDelimiter("=")
}
// WithEntryDelimiter sets the delimiter to be used between map
// entries.
//
// For example, in "k1=v1&k2=v2", the entry delimiter is "&"
func (m Map) WithEntryDelimiter(delimiter string) Map {
m.entryDelimiter = delimiter
return m
}
// WithKeyValueDelimiter sets the delimiter to be used between
// keys and values.
//
// For example, in "k1=v1&k2=v2", the key-value delimiter is "="
func (m Map) WithKeyValueDelimiter(delimiter string) Map {
m.keyValueDelimiter = delimiter
return m
}
// String returns a string representation of the Map flag.
func (m *Map) String() string {
var a []string
for k, v := range m.data {
a = append(a, fmt.Sprintf("%s%s%s", k, m.keyValueDelimiter, v))
}
return strings.Join(a, m.entryDelimiter)
}
// Set parses the provided string according to the delimiters and
// assigns the result to the Map receiver. It returns an error if
// the string is not parseable.
func (m *Map) Set(s string) error {
for _, part := range strings.Split(s, m.entryDelimiter) {
kvs := strings.SplitN(part, m.keyValueDelimiter, 2)
if len(kvs) != 2 {
return fmt.Errorf("error parsing %q", part)
}
m.data[kvs[0]] = kvs[1]
}
return nil
}
// Type returns a string representation of the
// Map type.
func (m *Map) Type() string {
return "mapStringString"
}
// Data returns the underlying golang map storing
// the flag data.
func (m *Map) Data() map[string]string {
return m.data
}

View File

@@ -0,0 +1,115 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package output
import (
"fmt"
"io"
"regexp"
"sort"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/printers"
"github.com/heptio/ark/pkg/apis/ark/v1"
)
var (
backupColumns = []string{"NAME", "STATUS", "CREATED", "EXPIRES", "SELECTOR"}
)
func printBackupList(list *v1.BackupList, w io.Writer, options printers.PrintOptions) error {
sortBackupsByPrefixAndTimestamp(list)
for i := range list.Items {
if err := printBackup(&list.Items[i], w, options); err != nil {
return err
}
}
return nil
}
func sortBackupsByPrefixAndTimestamp(list *v1.BackupList) {
// sort by default alphabetically, but if backups stem from a common schedule
// (detected by the presence of a 14-digit timestamp suffix), then within that
// group, sort by newest to oldest (i.e. prefix ASC, suffix DESC)
timestampSuffix := regexp.MustCompile("-[0-9]{14}$")
sort.Slice(list.Items, func(i, j int) bool {
iSuffixIndex := timestampSuffix.FindStringIndex(list.Items[i].Name)
jSuffixIndex := timestampSuffix.FindStringIndex(list.Items[j].Name)
// one/both don't have a timestamp suffix, so sort alphabetically
if iSuffixIndex == nil || jSuffixIndex == nil {
return list.Items[i].Name < list.Items[j].Name
}
// different prefixes, so sort alphabetically
if list.Items[i].Name[0:iSuffixIndex[0]] != list.Items[j].Name[0:jSuffixIndex[0]] {
return list.Items[i].Name < list.Items[j].Name
}
// same prefixes, so sort based on suffix (desc)
return list.Items[i].Name[iSuffixIndex[0]:] >= list.Items[j].Name[jSuffixIndex[0]:]
})
}
func printBackup(backup *v1.Backup, w io.Writer, options printers.PrintOptions) error {
name := printers.FormatResourceName(options.Kind, backup.Name, options.WithKind)
if options.WithNamespace {
if _, err := fmt.Fprintf(w, "%s\t", backup.Namespace); err != nil {
return err
}
}
expiration := backup.Status.Expiration.Time
if expiration.IsZero() && backup.Spec.TTL.Duration > 0 {
expiration = backup.CreationTimestamp.Add(backup.Spec.TTL.Duration)
}
status := backup.Status.Phase
if status == "" {
status = v1.BackupPhaseNew
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s", name, status, backup.CreationTimestamp.Time, humanReadableTimeFromNow(expiration), metav1.FormatLabelSelector(backup.Spec.LabelSelector)); err != nil {
return err
}
if _, err := fmt.Fprint(w, printers.AppendLabels(backup.Labels, options.ColumnLabels)); err != nil {
return err
}
_, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, backup.Labels))
return err
}
func humanReadableTimeFromNow(when time.Time) string {
if when.IsZero() {
return "n/a"
}
now := time.Now()
switch {
case when == now || when.After(now):
return printers.ShortHumanDuration(when.Sub(now))
default:
return fmt.Sprintf("%s ago", printers.ShortHumanDuration(now.Sub(when)))
}
}

View File

@@ -0,0 +1,89 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package output
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/heptio/ark/pkg/apis/ark/v1"
)
func TestSortBackups(t *testing.T) {
tests := []struct {
name string
backupList *v1.BackupList
expected []v1.Backup
}{
{
name: "non-timestamped backups",
backupList: &v1.BackupList{Items: []v1.Backup{
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "a"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "c"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "b"}},
}},
expected: []v1.Backup{
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "a"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "b"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "c"}},
},
},
{
name: "timestamped backups",
backupList: &v1.BackupList{Items: []v1.Backup{
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030405"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030406"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030407"}},
}},
expected: []v1.Backup{
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030407"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030406"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030405"}},
},
},
{
name: "non-timestamped and timestamped backups",
backupList: &v1.BackupList{Items: []v1.Backup{
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030405"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030406"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "a"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030407"}},
}},
expected: []v1.Backup{
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "a"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030407"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030406"}},
v1.Backup{ObjectMeta: metav1.ObjectMeta{Name: "schedule-20170102030405"}},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
sortBackupsByPrefixAndTimestamp(test.backupList)
if assert.Equal(t, len(test.backupList.Items), len(test.expected)) {
for i := range test.expected {
assert.Equal(t, test.expected[i].Name, test.backupList.Items[i].Name)
}
}
})
}
}

View File

@@ -0,0 +1,175 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package output
import (
"fmt"
"os"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apimachinery/pkg/api/meta"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/pkg/api"
"k8s.io/kubernetes/pkg/printers"
"github.com/heptio/ark/pkg/cmd/util/flag"
"github.com/heptio/ark/pkg/generated/clientset/scheme"
"github.com/heptio/ark/pkg/util/encode"
)
// BindFlags defines a set of output-specific flags within the provided
// FlagSet.
func BindFlags(flags *pflag.FlagSet) {
flags.StringP("output", "o", "table", "Output display format. For create commands, display the object but do not send it to the server. Valid formats are 'table', 'json', and 'yaml'.")
labelColumns := flag.NewStringArray()
flags.Var(&labelColumns, "label-columns", "a comma-separated list of labels to be displayed as columns")
flags.Bool("show-labels", false, "show labels in the last column")
}
// ClearOutputFlagDefault sets the current and default value
// of the "output" flag to the empty string.
func ClearOutputFlagDefault(cmd *cobra.Command) {
f := cmd.Flag("output")
if f == nil {
return
}
f.DefValue = ""
f.Value.Set("")
}
// GetOutputFlagValue returns the value of the "output" flag
// in the provided command, or the zero value if not present.
func GetOutputFlagValue(cmd *cobra.Command) string {
return flag.GetOptionalStringFlag(cmd, "output")
}
// GetLabelColumnsValues returns the value of the "label-columns" flag
// in the provided command, or the zero value if not present.
func GetLabelColumnsValues(cmd *cobra.Command) []string {
return flag.GetOptionalStringArrayFlag(cmd, "label-columns")
}
// GetShowLabelsValue returns the value of the "show-labels" flag
// in the provided command, or the zero value if not present.
func GetShowLabelsValue(cmd *cobra.Command) bool {
return flag.GetOptionalBoolFlag(cmd, "show-labels")
}
// ValidateFlags returns an error if any of the output-related flags
// were specified with invalid values, or nil otherwise.
func ValidateFlags(cmd *cobra.Command) error {
if err := validateOutputFlag(cmd); err != nil {
return err
}
return nil
}
func validateOutputFlag(cmd *cobra.Command) error {
output := GetOutputFlagValue(cmd)
switch output {
case "", "table", "json", "yaml":
default:
return fmt.Errorf("invalid output format %q - valid values are 'table', 'json', and 'yaml'", output)
}
return nil
}
// PrintWithFormat prints the provided object in the format specified by
// the command's flags.
func PrintWithFormat(c *cobra.Command, obj runtime.Object) (bool, error) {
format := GetOutputFlagValue(c)
if format == "" {
return false, nil
}
switch format {
case "table":
return printTable(c, obj)
case "json", "yaml":
return printEncoded(obj, format)
}
return false, fmt.Errorf("unsupported output format %q; valid values are 'table', 'json', and 'yaml'", format)
}
func printEncoded(obj runtime.Object, format string) (bool, error) {
// assume we're printing obj
toPrint := obj
if meta.IsListType(obj) {
list, _ := meta.ExtractList(obj)
if len(list) == 1 {
// if obj was a list and there was only 1 item, just print that 1 instead of a list
toPrint = list[0]
}
}
encoded, err := encode.Encode(toPrint, format)
if err != nil {
return false, err
}
fmt.Println(string(encoded))
return true, nil
}
func printTable(cmd *cobra.Command, obj runtime.Object) (bool, error) {
printer, err := NewPrinter(cmd)
if err != nil {
return false, err
}
printer.Handler(backupColumns, nil, printBackup)
printer.Handler(backupColumns, nil, printBackupList)
printer.Handler(restoreColumns, nil, printRestore)
printer.Handler(restoreColumns, nil, printRestoreList)
printer.Handler(scheduleColumns, nil, printSchedule)
printer.Handler(scheduleColumns, nil, printScheduleList)
err = printer.PrintObj(obj, os.Stdout)
if err != nil {
return false, err
}
return true, nil
}
// NewPrinter returns a printer for doing human-readable table printing of
// Ark objects.
func NewPrinter(cmd *cobra.Command) (*printers.HumanReadablePrinter, error) {
encoder, err := encode.EncoderFor("json")
if err != nil {
return nil, err
}
options := printers.PrintOptions{
NoHeaders: flag.GetOptionalBoolFlag(cmd, "no-headers"),
ShowLabels: GetShowLabelsValue(cmd),
ColumnLabels: GetLabelColumnsValues(cmd),
}
printer := printers.NewHumanReadablePrinter(
encoder,
scheme.Codecs.UniversalDecoder(api.SchemeGroupVersion),
options,
)
return printer, nil
}

View File

@@ -0,0 +1,74 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package output
import (
"fmt"
"io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/printers"
"github.com/heptio/ark/pkg/apis/ark/v1"
)
var (
restoreColumns = []string{"NAME", "BACKUP", "STATUS", "WARNINGS", "ERRORS", "CREATED", "SELECTOR"}
)
func printRestoreList(list *v1.RestoreList, w io.Writer, options printers.PrintOptions) error {
for i := range list.Items {
if err := printRestore(&list.Items[i], w, options); err != nil {
return err
}
}
return nil
}
func printRestore(restore *v1.Restore, w io.Writer, options printers.PrintOptions) error {
name := printers.FormatResourceName(options.Kind, restore.Name, options.WithKind)
if options.WithNamespace {
if _, err := fmt.Fprintf(w, "%s\t", restore.Namespace); err != nil {
return err
}
}
status := restore.Status.Phase
if status == "" {
status = v1.RestorePhaseNew
}
warnings := len(restore.Status.Warnings.Ark) + len(restore.Status.Warnings.Cluster)
for _, w := range restore.Status.Warnings.Namespaces {
warnings += len(w)
}
errors := len(restore.Status.Errors.Ark) + len(restore.Status.Errors.Cluster)
for _, e := range restore.Status.Errors.Namespaces {
errors += len(e)
}
if _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%d\t%d\t%s\t%s", name, restore.Spec.BackupName, status, warnings, errors, restore.CreationTimestamp.Time, metav1.FormatLabelSelector(restore.Spec.LabelSelector)); err != nil {
return err
}
if _, err := fmt.Fprint(w, printers.AppendLabels(restore.Labels, options.ColumnLabels)); err != nil {
return err
}
_, err := fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, restore.Labels))
return err
}

View File

@@ -0,0 +1,78 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package output
import (
"fmt"
"io"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/kubernetes/pkg/printers"
"github.com/heptio/ark/pkg/apis/ark/v1"
)
var (
scheduleColumns = []string{"NAME", "STATUS", "CREATED", "SCHEDULE", "BACKUP TTL", "LAST BACKUP", "SELECTOR"}
)
func printScheduleList(list *v1.ScheduleList, w io.Writer, options printers.PrintOptions) error {
for i := range list.Items {
if err := printSchedule(&list.Items[i], w, options); err != nil {
return err
}
}
return nil
}
func printSchedule(schedule *v1.Schedule, w io.Writer, options printers.PrintOptions) error {
name := printers.FormatResourceName(options.Kind, schedule.Name, options.WithKind)
if options.WithNamespace {
if _, err := fmt.Fprintf(w, "%s\t", schedule.Namespace); err != nil {
return err
}
}
status := schedule.Status.Phase
if status == "" {
status = v1.SchedulePhaseNew
}
_, err := fmt.Fprintf(
w,
"%s\t%s\t%s\t%s\t%s\t%s\t%s",
name,
status,
schedule.CreationTimestamp.Time,
schedule.Spec.Schedule,
schedule.Spec.Template.TTL.Duration,
humanReadableTimeFromNow(schedule.Status.LastBackup.Time),
metav1.FormatLabelSelector(schedule.Spec.Template.LabelSelector),
)
if err != nil {
return err
}
if _, err := fmt.Fprint(w, printers.AppendLabels(schedule.Labels, options.ColumnLabels)); err != nil {
return err
}
_, err = fmt.Fprint(w, printers.AppendAllLabels(options.ShowLabels, schedule.Labels))
return err
}

View File

@@ -0,0 +1,38 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package version
import (
"fmt"
"github.com/spf13/cobra"
"github.com/heptio/ark/pkg/buildinfo"
)
func NewCommand() *cobra.Command {
c := &cobra.Command{
Use: "version",
Short: "Print the ark version and associated image",
Run: func(cmd *cobra.Command, args []string) {
fmt.Println(buildinfo.Version)
fmt.Println("Configured docker image:", buildinfo.DockerImage)
},
}
return c
}

View File

@@ -0,0 +1,343 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"bytes"
"context"
"errors"
"fmt"
"io/ioutil"
"os"
"sync"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/clock"
kuberrs "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/backup"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/generated/clientset/scheme"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/util/collections"
"github.com/heptio/ark/pkg/util/encode"
)
const backupVersion = 1
type backupController struct {
backupper backup.Backupper
backupService cloudprovider.BackupService
bucket string
lister listers.BackupLister
listerSynced cache.InformerSynced
client arkv1client.BackupsGetter
syncHandler func(backupName string) error
queue workqueue.RateLimitingInterface
clock clock.Clock
}
func NewBackupController(
backupInformer informers.BackupInformer,
client arkv1client.BackupsGetter,
backupper backup.Backupper,
backupService cloudprovider.BackupService,
bucket string,
) Interface {
c := &backupController{
backupper: backupper,
backupService: backupService,
bucket: bucket,
lister: backupInformer.Lister(),
listerSynced: backupInformer.Informer().HasSynced,
client: client,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "backup"),
clock: &clock.RealClock{},
}
c.syncHandler = c.processBackup
backupInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
backup := obj.(*api.Backup)
switch backup.Status.Phase {
case "", api.BackupPhaseNew:
// only process new backups
default:
glog.V(4).Infof("Backup %s/%s has phase %s - skipping", backup.Namespace, backup.Name, backup.Status.Phase)
return
}
key, err := cache.MetaNamespaceKeyFunc(backup)
if err != nil {
glog.Errorf("error creating queue key for %#v: %v", backup, err)
return
}
c.queue.Add(key)
},
},
)
return c
}
// Run is a blocking function that runs the specified number of worker goroutines
// to process items in the work queue. It will return when it receives on the
// ctx.Done() channel.
func (controller *backupController) Run(ctx context.Context, numWorkers int) error {
var wg sync.WaitGroup
defer func() {
glog.Infof("Waiting for workers to finish their work")
controller.queue.ShutDown()
// We have to wait here in the deferred function instead of at the bottom of the function body
// because we have to shut down the queue in order for the workers to shut down gracefully, and
// we want to shut down the queue via defer and not at the end of the body.
wg.Wait()
glog.Infof("All workers have finished")
}()
glog.Info("Starting BackupController")
defer glog.Infof("Shutting down BackupController")
glog.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), controller.listerSynced) {
return errors.New("timed out waiting for caches to sync")
}
glog.Info("Caches are synced")
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
wait.Until(controller.runWorker, time.Second, ctx.Done())
wg.Done()
}()
}
<-ctx.Done()
return nil
}
func (controller *backupController) runWorker() {
// continually take items off the queue (waits if it's
// empty) until we get a shutdown signal from the queue
for controller.processNextWorkItem() {
}
}
func (controller *backupController) processNextWorkItem() bool {
key, quit := controller.queue.Get()
if quit {
return false
}
// always call done on this item, since if it fails we'll add
// it back with rate-limiting below
defer controller.queue.Done(key)
err := controller.syncHandler(key.(string))
if err == nil {
// If you had no error, tell the queue to stop tracking history for your key. This will reset
// things like failure counts for per-item rate limiting.
controller.queue.Forget(key)
return true
}
glog.Errorf("syncHandler error: %v", err)
// we had an error processing the item so add it back
// into the queue for re-processing with rate-limiting
controller.queue.AddRateLimited(key)
return true
}
func (controller *backupController) processBackup(key string) error {
glog.V(4).Infof("processBackup for key %q", key)
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error splitting key %q: %v", key, err)
return err
}
glog.V(4).Infof("Getting backup %s", key)
backup, err := controller.lister.Backups(ns).Get(name)
if err != nil {
glog.V(4).Infof("error getting backup %s: %v", key, err)
return err
}
// TODO I think this is now unnecessary. We only initially place
// item with Phase = ("" | New) into the queue. Items will only get
// re-queued if syncHandler returns an error, which will only
// happen if there's an error updating Phase from its initial
// state to something else. So any time it's re-queued it will
// still have its initial state, which we've already confirmed
// is ("" | New)
switch backup.Status.Phase {
case "", api.BackupPhaseNew:
// only process new backups
default:
return nil
}
glog.V(4).Infof("Cloning backup %s", key)
// don't modify items in the cache
backup, err = cloneBackup(backup)
if err != nil {
glog.V(4).Infof("error cloning backup %s: %v", key, err)
return err
}
// set backup version
backup.Status.Version = backupVersion
// included resources defaulting
if len(backup.Spec.IncludedResources) == 0 {
backup.Spec.IncludedResources = []string{"*"}
}
// included namespace defaulting
if len(backup.Spec.IncludedNamespaces) == 0 {
backup.Spec.IncludedNamespaces = []string{"*"}
}
// calculate expiration
if backup.Spec.TTL.Duration > 0 {
backup.Status.Expiration = metav1.NewTime(controller.clock.Now().Add(backup.Spec.TTL.Duration))
}
// validation
if backup.Status.ValidationErrors = controller.getValidationErrors(backup); len(backup.Status.ValidationErrors) > 0 {
backup.Status.Phase = api.BackupPhaseFailedValidation
} else {
backup.Status.Phase = api.BackupPhaseInProgress
}
// update status
updatedBackup, err := controller.client.Backups(ns).Update(backup)
if err != nil {
glog.V(4).Infof("error updating status to %s: %v", backup.Status.Phase, err)
return err
}
backup = updatedBackup
if backup.Status.Phase == api.BackupPhaseFailedValidation {
return nil
}
glog.V(4).Infof("running backup for %s", key)
// execution & upload of backup
if err := controller.runBackup(backup, controller.bucket); err != nil {
glog.V(4).Infof("backup %s failed: %v", key, err)
backup.Status.Phase = api.BackupPhaseFailed
}
glog.V(4).Infof("updating backup %s final status", key)
if _, err = controller.client.Backups(ns).Update(backup); err != nil {
glog.V(4).Infof("error updating backup %s final status: %v", key, err)
}
return nil
}
func cloneBackup(in interface{}) (*api.Backup, error) {
clone, err := scheme.Scheme.DeepCopy(in)
if err != nil {
return nil, err
}
out, ok := clone.(*api.Backup)
if !ok {
return nil, fmt.Errorf("unexpected type: %T", clone)
}
return out, nil
}
func (controller *backupController) getValidationErrors(itm *api.Backup) []string {
var validationErrors []string
for err := range collections.ValidateIncludesExcludes(itm.Spec.IncludedResources, itm.Spec.ExcludedResources) {
validationErrors = append(validationErrors, fmt.Sprintf("Invalid included/excluded resource lists: %v", err))
}
for err := range collections.ValidateIncludesExcludes(itm.Spec.IncludedNamespaces, itm.Spec.ExcludedNamespaces) {
validationErrors = append(validationErrors, fmt.Sprintf("Invalid included/excluded namespace lists: %v", err))
}
return validationErrors
}
func (controller *backupController) runBackup(backup *api.Backup, bucket string) error {
backupFile, err := ioutil.TempFile("", "")
if err != nil {
return err
}
defer func() {
var errs []error
errs = append(errs, err)
if closeErr := backupFile.Close(); closeErr != nil {
errs = append(errs, closeErr)
}
if removeErr := os.Remove(backupFile.Name()); removeErr != nil {
errs = append(errs, removeErr)
}
err = kuberrs.NewAggregate(errs)
}()
if err := controller.backupper.Backup(backup, backupFile); err != nil {
return err
}
// note: updating this here so the uploaded JSON shows "completed". If
// the upload fails, we'll alter the phase in the calling func.
glog.V(4).Infof("backup %s/%s completed", backup.Namespace, backup.Name)
backup.Status.Phase = api.BackupPhaseCompleted
buf := new(bytes.Buffer)
if err := encode.EncodeTo(backup, "json", buf); err != nil {
return err
}
// re-set the file offset to 0 for reading
_, err = backupFile.Seek(0, 0)
if err != nil {
return err
}
return controller.backupService.UploadBackup(bucket, backup.Name, bytes.NewReader(buf.Bytes()), backupFile)
}

View File

@@ -0,0 +1,253 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"io"
"testing"
"time"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
core "k8s.io/client-go/testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/fake"
"github.com/heptio/ark/pkg/generated/clientset/scheme"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
. "github.com/heptio/ark/pkg/util/test"
)
type fakeBackupper struct {
mock.Mock
}
func (b *fakeBackupper) Backup(backup *v1.Backup, data io.Writer) error {
args := b.Called(backup, data)
return args.Error(0)
}
func TestProcessBackup(t *testing.T) {
tests := []struct {
name string
key string
expectError bool
expectedIncludes []string
expectedExcludes []string
backup *TestBackup
expectBackup bool
}{
{
name: "bad key",
key: "bad/key/here",
expectError: true,
},
{
name: "lister failed",
key: "heptio-ark/backup1",
expectError: true,
},
{
name: "do not process phase FailedValidation",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailedValidation),
expectBackup: false,
},
{
name: "do not process phase InProgress",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseInProgress),
expectBackup: false,
},
{
name: "do not process phase Completed",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseCompleted),
expectBackup: false,
},
{
name: "do not process phase Failed",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseFailed),
expectBackup: false,
},
{
name: "do not process phase other",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase("arg"),
expectBackup: false,
},
{
name: "invalid included/excluded resources fails validation",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("foo").WithExcludedResources("foo"),
expectBackup: false,
},
{
name: "invalid included/excluded namespaces fails validation",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("foo").WithExcludedNamespaces("foo"),
expectBackup: false,
},
{
name: "make sure specified included and excluded resources are honored",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedResources("i", "j").WithExcludedResources("k", "l"),
expectedIncludes: []string{"i", "j"},
expectedExcludes: []string{"k", "l"},
expectBackup: true,
},
{
name: "if includednamespaces are specified, don't default to *",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithIncludedNamespaces("ns-1"),
expectedIncludes: []string{"*"},
expectBackup: true,
},
{
name: "ttl",
key: "heptio-ark/backup1",
backup: NewTestBackup().WithName("backup1").WithPhase(v1.BackupPhaseNew).WithTTL(10 * time.Minute),
expectedIncludes: []string{"*"},
expectBackup: true,
},
}
// flag.Set("logtostderr", "true")
// flag.Set("v", "4")
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
client := fake.NewSimpleClientset()
backupper := &fakeBackupper{}
cloudBackups := &fakeBackupService{}
sharedInformers := informers.NewSharedInformerFactory(client, 0)
c := NewBackupController(
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
backupper,
cloudBackups,
"bucket",
).(*backupController)
c.clock = clock.NewFakeClock(time.Now())
var expiration time.Time
var expectedNSes []string
if test.backup != nil {
// add directly to the informer's store so the lister can function and so we don't have to
// start the shared informers.
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup.Backup)
if test.backup.Spec.TTL.Duration > 0 {
expiration = c.clock.Now().Add(test.backup.Spec.TTL.Duration)
}
// set up a Backup object to represent what we expect to be passed to backupper.Backup()
copy, err := scheme.Scheme.Copy(test.backup.Backup)
assert.NoError(t, err, "copy error")
backup := copy.(*v1.Backup)
backup.Spec.IncludedResources = test.expectedIncludes
backup.Spec.ExcludedResources = test.expectedExcludes
if test.backup.Spec.IncludedNamespaces == nil {
expectedNSes = []string{"*"}
} else {
expectedNSes = test.backup.Spec.IncludedNamespaces
}
backup.Spec.IncludedNamespaces = expectedNSes
backup.Status.Phase = v1.BackupPhaseInProgress
backup.Status.Expiration.Time = expiration
backup.Status.Version = 1
backupper.On("Backup", backup, mock.Anything).Return(nil)
cloudBackups.On("UploadBackup", "bucket", backup.Name, mock.Anything, mock.Anything).Return(nil)
}
// this is necessary so the Update() call returns the appropriate object
client.PrependReactor("update", "backups", func(action core.Action) (bool, runtime.Object, error) {
obj := action.(core.UpdateAction).GetObject()
// need to deep copy so we can test the backup state for each call to update
copy, err := scheme.Scheme.DeepCopy(obj)
if err != nil {
return false, nil, err
}
ret := copy.(runtime.Object)
return true, ret, nil
})
// method under test
err := c.processBackup(test.key)
if test.expectError {
require.Error(t, err, "processBackup should error")
return
}
require.NoErrorf(t, err, "processBackup unexpected error: %v", err)
if !test.expectBackup {
assert.Empty(t, backupper.Calls)
assert.Empty(t, cloudBackups.Calls)
return
}
expectedActions := []core.Action{
core.NewUpdateAction(
v1.SchemeGroupVersion.WithResource("backups"),
v1.DefaultNamespace,
NewTestBackup().
WithName(test.backup.Name).
WithPhase(v1.BackupPhaseInProgress).
WithIncludedResources(test.expectedIncludes...).
WithExcludedResources(test.expectedExcludes...).
WithIncludedNamespaces(expectedNSes...).
WithTTL(test.backup.Spec.TTL.Duration).
WithExpiration(expiration).
WithVersion(1).
Backup,
),
core.NewUpdateAction(
v1.SchemeGroupVersion.WithResource("backups"),
v1.DefaultNamespace,
NewTestBackup().
WithName(test.backup.Name).
WithPhase(v1.BackupPhaseCompleted).
WithIncludedResources(test.expectedIncludes...).
WithExcludedResources(test.expectedExcludes...).
WithIncludedNamespaces(expectedNSes...).
WithTTL(test.backup.Spec.TTL.Duration).
WithExpiration(expiration).
WithVersion(1).
Backup,
),
}
assert.Equal(t, expectedActions, client.Actions())
})
}
}

View File

@@ -0,0 +1,77 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/heptio/ark/pkg/cloudprovider"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
)
type backupSyncController struct {
client arkv1client.BackupsGetter
backupService cloudprovider.BackupService
bucket string
syncPeriod time.Duration
}
func NewBackupSyncController(client arkv1client.BackupsGetter, backupService cloudprovider.BackupService, bucket string, syncPeriod time.Duration) Interface {
if syncPeriod < time.Minute {
glog.Infof("Backup sync period %v is too short. Setting to 1 minute", syncPeriod)
syncPeriod = time.Minute
}
return &backupSyncController{
client: client,
backupService: backupService,
bucket: bucket,
syncPeriod: syncPeriod,
}
}
// Run is a blocking function that continually runs the object storage -> Ark API
// sync process according to the controller's syncPeriod. It will return when it
// receives on the ctx.Done() channel.
func (c *backupSyncController) Run(ctx context.Context, workers int) error {
glog.Info("Running backup sync controller")
wait.Until(c.run, c.syncPeriod, ctx.Done())
return nil
}
func (c *backupSyncController) run() {
glog.Info("Syncing backups from object storage")
backups, err := c.backupService.GetAllBackups(c.bucket)
if err != nil {
glog.Errorf("error listing backups: %v", err)
return
}
glog.Infof("Found %d backups", len(backups))
for _, cloudBackup := range backups {
glog.Infof("Syncing backup %s/%s", cloudBackup.Namespace, cloudBackup.Name)
cloudBackup.ResourceVersion = ""
if _, err := c.client.Backups(cloudBackup.Namespace).Create(cloudBackup); err != nil && !errors.IsAlreadyExists(err) {
glog.Errorf("error syncing backup %s/%s from object storage: %v", cloudBackup.Namespace, cloudBackup.Name, err)
}
}
}

View File

@@ -0,0 +1,93 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"testing"
"time"
"github.com/stretchr/testify/assert"
core "k8s.io/client-go/testing"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/fake"
. "github.com/heptio/ark/pkg/util/test"
)
func TestRun(t *testing.T) {
tests := []struct {
name string
cloudBackups map[string][]*api.Backup
backupSvcErr error
}{
{
name: "no cloud backups",
},
{
name: "backup service returns error on GetAllBackups",
cloudBackups: map[string][]*api.Backup{
"nonexistent-bucket": []*api.Backup{
NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
},
},
},
{
name: "normal case",
cloudBackups: map[string][]*api.Backup{
"bucket": []*api.Backup{
NewTestBackup().WithNamespace("ns-1").WithName("backup-1").Backup,
NewTestBackup().WithNamespace("ns-1").WithName("backup-2").Backup,
NewTestBackup().WithNamespace("ns-2").WithName("backup-3").Backup,
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
bs = &fakeBackupService{backupsByBucket: test.cloudBackups}
client = fake.NewSimpleClientset()
)
c := NewBackupSyncController(
client.ArkV1(),
bs,
"bucket",
time.Duration(0),
).(*backupSyncController)
c.run()
expectedActions := make([]core.Action, 0)
// we only expect creates for items within the target bucket
for _, cloudBackup := range test.cloudBackups["bucket"] {
action := core.NewCreateAction(
api.SchemeGroupVersion.WithResource("backups"),
cloudBackup.Namespace,
cloudBackup,
)
expectedActions = append(expectedActions, action)
}
assert.Equal(t, expectedActions, client.Actions())
})
}
}

View File

@@ -0,0 +1,149 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"errors"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"github.com/heptio/ark/pkg/cloudprovider"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
)
// gcController removes expired backup content from object storage.
type gcController struct {
backupService cloudprovider.BackupService
snapshotService cloudprovider.SnapshotService
bucket string
syncPeriod time.Duration
clock clock.Clock
lister listers.BackupLister
listerSynced cache.InformerSynced
client arkv1client.BackupsGetter
}
// NewGCController constructs a new gcController.
func NewGCController(
backupService cloudprovider.BackupService,
snapshotService cloudprovider.SnapshotService,
bucket string,
syncPeriod time.Duration,
backupInformer informers.BackupInformer,
client arkv1client.BackupsGetter,
) Interface {
if syncPeriod < time.Minute {
glog.Infof("GC sync period %v is too short. Setting to 1 minute", syncPeriod)
syncPeriod = time.Minute
}
return &gcController{
backupService: backupService,
snapshotService: snapshotService,
bucket: bucket,
syncPeriod: syncPeriod,
clock: clock.RealClock{},
lister: backupInformer.Lister(),
listerSynced: backupInformer.Informer().HasSynced,
client: client,
}
}
var _ Interface = &gcController{}
// Run is a blocking function that runs a single worker to garbage-collect backups
// from object/block storage and the Ark API. It will return when it receives on the
// ctx.Done() channel.
func (c *gcController) Run(ctx context.Context, workers int) error {
glog.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), c.listerSynced) {
return errors.New("timed out waiting for caches to sync")
}
glog.Info("Caches are synced")
wait.Until(c.run, c.syncPeriod, ctx.Done())
return nil
}
func (c *gcController) run() {
c.cleanBackups()
}
// cleanBackups deletes expired backups.
func (c *gcController) cleanBackups() {
backups, err := c.backupService.GetAllBackups(c.bucket)
if err != nil {
glog.Errorf("error getting all backups: %v", err)
return
}
now := c.clock.Now()
glog.Infof("garbage-collecting backups that have expired as of %v", now)
// GC backup files and associated snapshots/API objects. Note that deletion from object
// storage should happen first because otherwise there's a possibility the backup sync
// controller would re-create the API object after deletion.
for _, backup := range backups {
if backup.Status.Expiration.Time.Before(now) {
glog.Infof("Removing backup %s/%s", backup.Namespace, backup.Name)
if err := c.backupService.DeleteBackup(c.bucket, backup.Name); err != nil {
glog.Errorf("error deleting backup %s/%s: %v", backup.Namespace, backup.Name, err)
}
for _, volumeBackup := range backup.Status.VolumeBackups {
glog.Infof("Removing snapshot %s associated with backup %s/%s", volumeBackup.SnapshotID, backup.Namespace, backup.Name)
if err := c.snapshotService.DeleteSnapshot(volumeBackup.SnapshotID); err != nil {
glog.Errorf("error deleting snapshot %v: %v", volumeBackup.SnapshotID, err)
}
}
glog.Infof("Removing backup API object %s/%s", backup.Namespace, backup.Name)
if err := c.client.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("error deleting backup API object %s/%s: %v", backup.Namespace, backup.Name, err)
}
} else {
glog.Infof("Backup %s/%s has not expired yet, skipping", backup.Namespace, backup.Name)
}
}
// also GC any Backup API objects without files in object storage
apiBackups, err := c.lister.List(labels.NewSelector())
if err != nil {
glog.Errorf("error getting all backup API objects: %v", err)
}
for _, backup := range apiBackups {
if backup.Status.Expiration.Time.Before(now) {
glog.Infof("Removing backup API object %s/%s", backup.Namespace, backup.Name)
if err := c.client.Backups(backup.Namespace).Delete(backup.Name, &metav1.DeleteOptions{}); err != nil {
glog.Errorf("error deleting backup API object %s/%s: %v", backup.Namespace, backup.Name, err)
}
} else {
glog.Infof("Backup %s/%s has not expired yet, skipping", backup.Namespace, backup.Name)
}
}
}

View File

@@ -0,0 +1,318 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"bytes"
"errors"
"io"
"io/ioutil"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/sets"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
. "github.com/heptio/ark/pkg/util/test"
)
type gcTest struct {
name string
bucket string
backups map[string][]*api.Backup
snapshots sets.String
expectedBackupsRemaining map[string]sets.String
expectedSnapshotsRemaining sets.String
}
func TestGarbageCollect(t *testing.T) {
fakeClock := clock.NewFakeClock(time.Now())
tests := []gcTest{
gcTest{
name: "basic-expired",
bucket: "bucket-1",
backups: map[string][]*api.Backup{
"bucket-1": []*api.Backup{
NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
expectedBackupsRemaining: make(map[string]sets.String),
expectedSnapshotsRemaining: sets.NewString(),
},
gcTest{
name: "basic-unexpired",
bucket: "bucket-1",
backups: map[string][]*api.Backup{
"bucket-1": []*api.Backup{
NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
expectedBackupsRemaining: map[string]sets.String{
"bucket-1": sets.NewString("backup-1"),
},
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2"),
},
gcTest{
name: "one expired, one unexpired",
bucket: "bucket-1",
backups: map[string][]*api.Backup{
"bucket-1": []*api.Backup{
NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-3", "snapshot-3").
WithSnapshot("pv-4", "snapshot-4").
Backup,
},
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedBackupsRemaining: map[string]sets.String{
"bucket-1": sets.NewString("backup-2"),
},
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
},
gcTest{
name: "none expired in target bucket",
bucket: "bucket-2",
backups: map[string][]*api.Backup{
"bucket-1": []*api.Backup{
NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
"bucket-2": []*api.Backup{
NewTestBackup().WithName("backup-2").
WithExpiration(fakeClock.Now().Add(1*time.Minute)).
WithSnapshot("pv-3", "snapshot-3").
WithSnapshot("pv-4", "snapshot-4").
Backup,
},
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedBackupsRemaining: map[string]sets.String{
"bucket-1": sets.NewString("backup-1"),
"bucket-2": sets.NewString("backup-2"),
},
expectedSnapshotsRemaining: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
},
gcTest{
name: "orphan snapshots",
bucket: "bucket-1",
backups: map[string][]*api.Backup{
"bucket-1": []*api.Backup{
NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(-1*time.Minute)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
},
snapshots: sets.NewString("snapshot-1", "snapshot-2", "snapshot-3", "snapshot-4"),
expectedBackupsRemaining: make(map[string]sets.String),
expectedSnapshotsRemaining: sets.NewString("snapshot-3", "snapshot-4"),
},
}
for _, test := range tests {
backupService := &fakeBackupService{}
snapshotService := &FakeSnapshotService{}
t.Run(test.name, func(t *testing.T) {
backupService.backupsByBucket = make(map[string][]*api.Backup)
for bucket, backups := range test.backups {
data := make([]*api.Backup, 0, len(backups))
for _, backup := range backups {
data = append(data, backup)
}
backupService.backupsByBucket[bucket] = data
}
snapshotService.SnapshotsTaken = test.snapshots
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
)
controller := NewGCController(
backupService,
snapshotService,
test.bucket,
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
).(*gcController)
controller.clock = fakeClock
controller.cleanBackups()
// verify every bucket has the backups we expect
for bucket, backups := range backupService.backupsByBucket {
// if actual and expected are both empty, no further verification needed
if len(backups) == 0 && len(test.expectedBackupsRemaining[bucket]) == 0 {
continue
}
// get all the actual backups remaining in this bucket
backupNames := sets.NewString()
for _, backup := range backupService.backupsByBucket[bucket] {
backupNames.Insert(backup.Name)
}
assert.Equal(t, test.expectedBackupsRemaining[bucket], backupNames)
}
assert.Equal(t, test.expectedSnapshotsRemaining, snapshotService.SnapshotsTaken)
})
}
}
func TestGarbageCollectPicksUpBackupUponExpiration(t *testing.T) {
var (
backupService = &fakeBackupService{}
snapshotService = &FakeSnapshotService{}
fakeClock = clock.NewFakeClock(time.Now())
assert = assert.New(t)
)
scenario := gcTest{
name: "basic-expired",
bucket: "bucket-1",
backups: map[string][]*api.Backup{
"bucket-1": []*api.Backup{
NewTestBackup().WithName("backup-1").
WithExpiration(fakeClock.Now().Add(1*time.Second)).
WithSnapshot("pv-1", "snapshot-1").
WithSnapshot("pv-2", "snapshot-2").
Backup,
},
},
snapshots: sets.NewString("snapshot-1", "snapshot-2"),
}
backupService.backupsByBucket = make(map[string][]*api.Backup)
for bucket, backups := range scenario.backups {
data := make([]*api.Backup, 0, len(backups))
for _, backup := range backups {
data = append(data, backup)
}
backupService.backupsByBucket[bucket] = data
}
snapshotService.SnapshotsTaken = scenario.snapshots
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
)
controller := NewGCController(
backupService,
snapshotService,
scenario.bucket,
1*time.Millisecond,
sharedInformers.Ark().V1().Backups(),
client.ArkV1(),
).(*gcController)
controller.clock = fakeClock
// PASS 1
controller.cleanBackups()
assert.Equal(scenario.backups, backupService.backupsByBucket, "backups should not be garbage-collected yet.")
assert.Equal(scenario.snapshots, snapshotService.SnapshotsTaken, "snapshots should not be garbage-collected yet.")
// PASS 2
fakeClock.Step(1 * time.Minute)
controller.cleanBackups()
assert.Equal(0, len(backupService.backupsByBucket[scenario.bucket]), "backups should have been garbage-collected.")
assert.Equal(0, len(snapshotService.SnapshotsTaken), "snapshots should have been garbage-collected.")
}
type fakeBackupService struct {
backupsByBucket map[string][]*api.Backup
mock.Mock
}
func (s *fakeBackupService) GetAllBackups(bucket string) ([]*api.Backup, error) {
backups, found := s.backupsByBucket[bucket]
if !found {
return nil, errors.New("bucket not found")
}
return backups, nil
}
func (bs *fakeBackupService) UploadBackup(bucket, name string, metadata, backup io.ReadSeeker) error {
args := bs.Called(bucket, name, metadata, backup)
return args.Error(0)
}
func (s *fakeBackupService) DownloadBackup(bucket, name string) (io.ReadCloser, error) {
return ioutil.NopCloser(bytes.NewReader([]byte("hello world"))), nil
}
func (s *fakeBackupService) DeleteBackup(bucket, backupName string) error {
backups, err := s.GetAllBackups(bucket)
if err != nil {
return err
}
deleteIdx := -1
for i, backup := range backups {
if backup.Name == backupName {
deleteIdx = i
break
}
}
if deleteIdx == -1 {
return errors.New("backup not found")
}
s.backupsByBucket[bucket] = append(s.backupsByBucket[bucket][0:deleteIdx], s.backupsByBucket[bucket][deleteIdx+1:]...)
return nil
}

View File

@@ -0,0 +1,25 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import "context"
// Interface represents a runnable component.
type Interface interface {
// Run runs the component.
Run(ctx context.Context, workers int) error
}

View File

@@ -0,0 +1,333 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"sync"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/cloudprovider"
"github.com/heptio/ark/pkg/generated/clientset/scheme"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
"github.com/heptio/ark/pkg/restore"
)
type restoreController struct {
restoreClient arkv1client.RestoresGetter
backupClient arkv1client.BackupsGetter
restorer restore.Restorer
backupService cloudprovider.BackupService
bucket string
backupLister listers.BackupLister
backupListerSynced cache.InformerSynced
restoreLister listers.RestoreLister
restoreListerSynced cache.InformerSynced
syncHandler func(restoreName string) error
queue workqueue.RateLimitingInterface
}
func NewRestoreController(
restoreInformer informers.RestoreInformer,
restoreClient arkv1client.RestoresGetter,
backupClient arkv1client.BackupsGetter,
restorer restore.Restorer,
backupService cloudprovider.BackupService,
bucket string,
backupInformer informers.BackupInformer,
) Interface {
c := &restoreController{
restoreClient: restoreClient,
backupClient: backupClient,
restorer: restorer,
backupService: backupService,
bucket: bucket,
backupLister: backupInformer.Lister(),
backupListerSynced: backupInformer.Informer().HasSynced,
restoreLister: restoreInformer.Lister(),
restoreListerSynced: restoreInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "restore"),
}
c.syncHandler = c.processRestore
restoreInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
restore := obj.(*api.Restore)
switch restore.Status.Phase {
case "", api.RestorePhaseNew:
// only process new restores
default:
glog.V(4).Infof("Restore %s/%s has phase %s - skipping", restore.Namespace, restore.Name, restore.Status.Phase)
return
}
key, err := cache.MetaNamespaceKeyFunc(restore)
if err != nil {
glog.Errorf("error creating queue key for %#v: %v", restore, err)
return
}
c.queue.Add(key)
},
},
)
return c
}
// Run is a blocking function that runs the specified number of worker goroutines
// to process items in the work queue. It will return when it receives on the
// ctx.Done() channel.
func (controller *restoreController) Run(ctx context.Context, numWorkers int) error {
var wg sync.WaitGroup
defer func() {
glog.Infof("Waiting for workers to finish their work")
controller.queue.ShutDown()
// We have to wait here in the deferred function instead of at the bottom of the function body
// because we have to shut down the queue in order for the workers to shut down gracefully, and
// we want to shut down the queue via defer and not at the end of the body.
wg.Wait()
glog.Infof("All workers have finished")
}()
glog.Info("Starting RestoreController")
defer glog.Info("Shutting down RestoreController")
glog.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), controller.backupListerSynced, controller.restoreListerSynced) {
return errors.New("timed out waiting for caches to sync")
}
glog.Info("Caches are synced")
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
wait.Until(controller.runWorker, time.Second, ctx.Done())
wg.Done()
}()
}
<-ctx.Done()
return nil
}
func (controller *restoreController) runWorker() {
// continually take items off the queue (waits if it's
// empty) until we get a shutdown signal from the queue
for controller.processNextWorkItem() {
}
}
func (controller *restoreController) processNextWorkItem() bool {
key, quit := controller.queue.Get()
if quit {
return false
}
// always call done on this item, since if it fails we'll add
// it back with rate-limiting below
defer controller.queue.Done(key)
err := controller.syncHandler(key.(string))
if err == nil {
// If you had no error, tell the queue to stop tracking history for your key. This will reset
// things like failure counts for per-item rate limiting.
controller.queue.Forget(key)
return true
}
glog.Errorf("syncHandler error: %v", err)
// we had an error processing the item so add it back
// into the queue for re-processing with rate-limiting
controller.queue.AddRateLimited(key)
return true
}
func (controller *restoreController) processRestore(key string) error {
glog.V(4).Infof("processRestore for key %q", key)
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error splitting key %q: %v", key, err)
return err
}
glog.V(4).Infof("Getting restore %s", key)
restore, err := controller.restoreLister.Restores(ns).Get(name)
if err != nil {
glog.V(4).Infof("error getting restore %s: %v", key, err)
return err
}
// TODO I think this is now unnecessary. We only initially place
// item with Phase = ("" | New) into the queue. Items will only get
// re-queued if syncHandler returns an error, which will only
// happen if there's an error updating Phase from its initial
// state to something else. So any time it's re-queued it will
// still have its initial state, which we've already confirmed
// is ("" | New)
switch restore.Status.Phase {
case "", api.RestorePhaseNew:
// only process new restores
default:
return nil
}
glog.V(4).Infof("Cloning restore %s", key)
// don't modify items in the cache
restore, err = cloneRestore(restore)
if err != nil {
glog.V(4).Infof("error cloning restore %s: %v", key, err)
return err
}
// validation
if restore.Status.ValidationErrors = controller.getValidationErrors(restore); len(restore.Status.ValidationErrors) > 0 {
restore.Status.Phase = api.RestorePhaseFailedValidation
} else {
restore.Status.Phase = api.RestorePhaseInProgress
}
if len(restore.Spec.Namespaces) == 0 {
restore.Spec.Namespaces = []string{"*"}
}
// update status
updatedRestore, err := controller.restoreClient.Restores(ns).Update(restore)
if err != nil {
glog.V(4).Infof("error updating status to %s: %v", restore.Status.Phase, err)
return err
}
restore = updatedRestore
if restore.Status.Phase == api.RestorePhaseFailedValidation {
return nil
}
glog.V(4).Infof("running restore for %s", key)
// execution & upload of restore
restore.Status.Warnings, restore.Status.Errors = controller.runRestore(restore, controller.bucket)
glog.V(4).Infof("restore %s completed", key)
restore.Status.Phase = api.RestorePhaseCompleted
glog.V(4).Infof("updating restore %s final status", key)
if _, err = controller.restoreClient.Restores(ns).Update(restore); err != nil {
glog.V(4).Infof("error updating restore %s final status: %v", key, err)
}
return nil
}
func cloneRestore(in interface{}) (*api.Restore, error) {
clone, err := scheme.Scheme.DeepCopy(in)
if err != nil {
return nil, err
}
out, ok := clone.(*api.Restore)
if !ok {
return nil, fmt.Errorf("unexpected type: %T", clone)
}
return out, nil
}
func (controller *restoreController) getValidationErrors(itm *api.Restore) []string {
var validationErrors []string
if itm.Spec.BackupName == "" {
validationErrors = append(validationErrors, "BackupName must be non-empty and correspond to the name of a backup in object storage.")
}
return validationErrors
}
func (controller *restoreController) runRestore(restore *api.Restore, bucket string) (warnings, errors api.RestoreResult) {
backup, err := controller.backupLister.Backups(api.DefaultNamespace).Get(restore.Spec.BackupName)
if err != nil {
glog.Errorf("error getting backup: %v", err)
errors.Cluster = append(errors.Ark, err.Error())
return
}
tmpFile, err := downloadToTempFile(restore.Spec.BackupName, controller.backupService, bucket)
if err != nil {
glog.Errorf("error downloading backup: %v", err)
errors.Cluster = append(errors.Ark, err.Error())
return
}
defer func() {
if err := tmpFile.Close(); err != nil {
errors.Cluster = append(errors.Ark, err.Error())
}
if err := os.Remove(tmpFile.Name()); err != nil {
errors.Cluster = append(errors.Ark, err.Error())
}
}()
return controller.restorer.Restore(restore, backup, tmpFile)
}
func downloadToTempFile(backupName string, backupService cloudprovider.BackupService, bucket string) (*os.File, error) {
readCloser, err := backupService.DownloadBackup(bucket, backupName)
if err != nil {
return nil, err
}
defer readCloser.Close()
file, err := ioutil.TempFile("", backupName)
if err != nil {
return nil, err
}
n, err := io.Copy(file, readCloser)
if err != nil {
return nil, err
}
glog.V(4).Infof("copied %d bytes", n)
if _, err := file.Seek(0, 0); err != nil {
glog.V(4).Infof("error seeking: %v", err)
return nil, err
}
return file, nil
}

View File

@@ -0,0 +1,247 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"errors"
"io"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
. "github.com/heptio/ark/pkg/util/test"
)
func TestProcessRestore(t *testing.T) {
tests := []struct {
name string
restoreKey string
restore *api.Restore
backup *api.Backup
restorerError error
expectedErr bool
expectedRestoreUpdates []*api.Restore
expectedRestorerCall *api.Restore
}{
{
name: "invalid key returns error",
restoreKey: "invalid/key/value",
expectedErr: true,
},
{
name: "missing restore returns error",
restoreKey: "foo/bar",
expectedErr: true,
},
{
name: "restore with phase InProgress does not get processed",
restore: NewTestRestore("foo", "bar", api.RestorePhaseInProgress).Restore,
expectedErr: false,
},
{
name: "restore with phase Completed does not get processed",
restore: NewTestRestore("foo", "bar", api.RestorePhaseCompleted).Restore,
expectedErr: false,
},
{
name: "restore with phase FailedValidation does not get processed",
restore: NewTestRestore("foo", "bar", api.RestorePhaseFailedValidation).Restore,
expectedErr: false,
},
{
name: "new restore with empty backup name fails validation",
restore: NewTestRestore("foo", "bar", api.RestorePhaseNew).WithRestorableNamespace("ns-1").Restore,
expectedErr: false,
expectedRestoreUpdates: []*api.Restore{
NewTestRestore("foo", "bar", api.RestorePhaseFailedValidation).
WithRestorableNamespace("ns-1").
WithValidationError("BackupName must be non-empty and correspond to the name of a backup in object storage.").Restore,
},
},
{
name: "restore with non-existent backup name fails",
restore: NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
expectedErr: false,
expectedRestoreUpdates: []*api.Restore{
NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
NewTestRestore("foo", "bar", api.RestorePhaseCompleted).
WithBackup("backup-1").
WithRestorableNamespace("ns-1").
WithErrors(api.RestoreResult{
Cluster: []string{"backup.ark.heptio.com \"backup-1\" not found"},
}).
Restore,
},
},
{
name: "restorer throwing an error causes the restore to fail",
restore: NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
backup: NewTestBackup().WithName("backup-1").Backup,
restorerError: errors.New("blarg"),
expectedErr: false,
expectedRestoreUpdates: []*api.Restore{
NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
NewTestRestore("foo", "bar", api.RestorePhaseCompleted).
WithBackup("backup-1").
WithRestorableNamespace("ns-1").
WithErrors(api.RestoreResult{
Namespaces: map[string][]string{
"ns-1": {"blarg"},
},
}).Restore,
},
expectedRestorerCall: NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
},
{
name: "valid restore gets executed",
restore: NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
backup: NewTestBackup().WithName("backup-1").Backup,
expectedErr: false,
expectedRestoreUpdates: []*api.Restore{
NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
NewTestRestore("foo", "bar", api.RestorePhaseCompleted).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
},
expectedRestorerCall: NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("ns-1").Restore,
},
{
name: "restore with no restorable namespaces gets defaulted to *",
restore: NewTestRestore("foo", "bar", api.RestorePhaseNew).WithBackup("backup-1").Restore,
backup: NewTestBackup().WithName("backup-1").Backup,
expectedErr: false,
expectedRestoreUpdates: []*api.Restore{
NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("*").Restore,
NewTestRestore("foo", "bar", api.RestorePhaseCompleted).WithBackup("backup-1").WithRestorableNamespace("*").Restore,
},
expectedRestorerCall: NewTestRestore("foo", "bar", api.RestorePhaseInProgress).WithBackup("backup-1").WithRestorableNamespace("*").Restore,
},
}
// flag.Set("logtostderr", "true")
// flag.Set("v", "4")
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
client = fake.NewSimpleClientset()
restorer = &fakeRestorer{}
sharedInformers = informers.NewSharedInformerFactory(client, 0)
backupSvc = &fakeBackupService{}
)
c := NewRestoreController(
sharedInformers.Ark().V1().Restores(),
client.ArkV1(),
client.ArkV1(),
restorer,
backupSvc,
"bucket",
sharedInformers.Ark().V1().Backups(),
).(*restoreController)
if test.restore != nil {
sharedInformers.Ark().V1().Restores().Informer().GetStore().Add(test.restore)
// this is necessary so the Update() call returns the appropriate object
client.PrependReactor("update", "restores", func(action core.Action) (bool, runtime.Object, error) {
obj := action.(core.UpdateAction).GetObject()
// need to deep copy so we can test the backup state for each call to update
copy, err := scheme.Scheme.DeepCopy(obj)
if err != nil {
return false, nil, err
}
ret := copy.(runtime.Object)
return true, ret, nil
})
}
if test.backup != nil {
sharedInformers.Ark().V1().Backups().Informer().GetStore().Add(test.backup)
}
var warnings, errors api.RestoreResult
if test.restorerError != nil {
errors.Namespaces = map[string][]string{"ns-1": {test.restorerError.Error()}}
}
restorer.On("Restore", mock.Anything, mock.Anything, mock.Anything).Return(warnings, errors)
var (
key = test.restoreKey
err error
)
if key == "" && test.restore != nil {
key, err = cache.MetaNamespaceKeyFunc(test.restore)
if err != nil {
panic(err)
}
}
err = c.processRestore(key)
assert.Equal(t, test.expectedErr, err != nil, "got error %v", err)
if test.expectedRestoreUpdates != nil {
var expectedActions []core.Action
for _, upd := range test.expectedRestoreUpdates {
action := core.NewUpdateAction(
api.SchemeGroupVersion.WithResource("restores"),
upd.Namespace,
upd)
expectedActions = append(expectedActions, action)
}
assert.Equal(t, expectedActions, client.Actions())
}
if test.expectedRestorerCall == nil {
assert.Empty(t, restorer.Calls)
assert.Zero(t, restorer.calledWithArg)
} else {
assert.Equal(t, 1, len(restorer.Calls))
// explicitly capturing the argument passed to Restore myself because
// I want to validate the called arg as of the time of calling, but
// the mock stores the pointer, which gets modified after
assert.Equal(t, *test.expectedRestorerCall, restorer.calledWithArg)
}
})
}
}
type fakeRestorer struct {
mock.Mock
calledWithArg api.Restore
}
func (r *fakeRestorer) Restore(restore *api.Restore, backup *api.Backup, backupReader io.Reader) (api.RestoreResult, api.RestoreResult) {
res := r.Called(restore, backup, backupReader)
r.calledWithArg = *restore
return res.Get(0).(api.RestoreResult), res.Get(1).(api.RestoreResult)
}

View File

@@ -0,0 +1,381 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"context"
"errors"
"fmt"
"sync"
"time"
"github.com/golang/glog"
"github.com/robfig/cron"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/scheme"
arkv1client "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
listers "github.com/heptio/ark/pkg/generated/listers/ark/v1"
)
type scheduleController struct {
schedulesClient arkv1client.SchedulesGetter
backupsClient arkv1client.BackupsGetter
schedulesLister listers.ScheduleLister
schedulesListerSynced cache.InformerSynced
syncHandler func(scheduleName string) error
queue workqueue.RateLimitingInterface
syncPeriod time.Duration
clock clock.Clock
}
func NewScheduleController(
schedulesClient arkv1client.SchedulesGetter,
backupsClient arkv1client.BackupsGetter,
schedulesInformer informers.ScheduleInformer,
syncPeriod time.Duration,
) *scheduleController {
if syncPeriod < time.Minute {
glog.Infof("Schedule sync period %v is too short. Setting to 1 minute", syncPeriod)
syncPeriod = time.Minute
}
c := &scheduleController{
schedulesClient: schedulesClient,
backupsClient: backupsClient,
schedulesLister: schedulesInformer.Lister(),
schedulesListerSynced: schedulesInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "schedule"),
syncPeriod: syncPeriod,
clock: clock.RealClock{},
}
c.syncHandler = c.processSchedule
schedulesInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
schedule := obj.(*api.Schedule)
switch schedule.Status.Phase {
case "", api.SchedulePhaseNew, api.SchedulePhaseEnabled:
// add to work queue
default:
glog.V(4).Infof("Schedule %s/%s has phase %s - skipping", schedule.Namespace, schedule.Name, schedule.Status.Phase)
return
}
key, err := cache.MetaNamespaceKeyFunc(schedule)
if err != nil {
glog.Errorf("error creating queue key for %#v: %v", schedule, err)
return
}
c.queue.Add(key)
},
},
)
return c
}
// Run is a blocking function that runs the specified number of worker goroutines
// to process items in the work queue. It will return when it receives on the
// ctx.Done() channel.
func (controller *scheduleController) Run(ctx context.Context, numWorkers int) error {
var wg sync.WaitGroup
defer func() {
glog.Infof("Waiting for workers to finish their work")
controller.queue.ShutDown()
// We have to wait here in the deferred function instead of at the bottom of the function body
// because we have to shut down the queue in order for the workers to shut down gracefully, and
// we want to shut down the queue via defer and not at the end of the body.
wg.Wait()
glog.Infof("All workers have finished")
}()
glog.Info("Starting ScheduleController")
defer glog.Info("Shutting down ScheduleController")
glog.Info("Waiting for caches to sync")
if !cache.WaitForCacheSync(ctx.Done(), controller.schedulesListerSynced) {
return errors.New("timed out waiting for caches to sync")
}
glog.Info("Caches are synced")
wg.Add(numWorkers)
for i := 0; i < numWorkers; i++ {
go func() {
wait.Until(controller.runWorker, time.Second, ctx.Done())
wg.Done()
}()
}
go wait.Until(controller.enqueueAllEnabledSchedules, controller.syncPeriod, ctx.Done())
<-ctx.Done()
return nil
}
func (controller *scheduleController) enqueueAllEnabledSchedules() {
schedules, err := controller.schedulesLister.Schedules(api.DefaultNamespace).List(labels.NewSelector())
if err != nil {
glog.Errorf("error listing schedules: %v", err)
return
}
for _, schedule := range schedules {
if schedule.Status.Phase != api.SchedulePhaseEnabled {
continue
}
key, err := cache.MetaNamespaceKeyFunc(schedule)
if err != nil {
glog.Errorf("error creating queue key for %#v: %v", schedule, err)
continue
}
controller.queue.Add(key)
}
}
func (controller *scheduleController) runWorker() {
// continually take items off the queue (waits if it's
// empty) until we get a shutdown signal from the queue
for controller.processNextWorkItem() {
}
}
func (controller *scheduleController) processNextWorkItem() bool {
key, quit := controller.queue.Get()
if quit {
return false
}
// always call done on this item, since if it fails we'll add
// it back with rate-limiting below
defer controller.queue.Done(key)
err := controller.syncHandler(key.(string))
if err == nil {
// If you had no error, tell the queue to stop tracking history for your key. This will reset
// things like failure counts for per-item rate limiting.
controller.queue.Forget(key)
return true
}
glog.Errorf("syncHandler error: %v", err)
// we had an error processing the item so add it back
// into the queue for re-processing with rate-limiting
controller.queue.AddRateLimited(key)
return true
}
func (controller *scheduleController) processSchedule(key string) error {
glog.V(4).Infof("processSchedule for key %q", key)
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.V(4).Infof("error splitting key %q: %v", key, err)
return err
}
glog.V(4).Infof("Getting schedule %s", key)
schedule, err := controller.schedulesLister.Schedules(ns).Get(name)
if err != nil {
// schedule no longer exists
if apierrors.IsNotFound(err) {
glog.V(4).Infof("schedule %s not found: %v", key, err)
return nil
}
glog.V(4).Infof("error getting schedule %s: %v", key, err)
return err
}
switch schedule.Status.Phase {
case "", api.SchedulePhaseNew, api.SchedulePhaseEnabled:
// valid phase for processing
default:
return nil
}
glog.V(4).Infof("Cloning schedule %s", key)
// don't modify items in the cache
schedule, err = cloneSchedule(schedule)
if err != nil {
glog.V(4).Infof("error cloning schedule %s: %v", key, err)
return err
}
// validation - even if the item is Enabled, we can't trust it
// so re-validate
currentPhase := schedule.Status.Phase
cronSchedule, errs := parseCronSchedule(schedule)
if len(errs) > 0 {
schedule.Status.Phase = api.SchedulePhaseFailedValidation
schedule.Status.ValidationErrors = errs
} else {
schedule.Status.Phase = api.SchedulePhaseEnabled
}
// update status if it's changed
if currentPhase != schedule.Status.Phase {
updatedSchedule, err := controller.schedulesClient.Schedules(ns).Update(schedule)
if err != nil {
glog.V(4).Infof("error updating status to %s: %v", schedule.Status.Phase, err)
return err
}
schedule = updatedSchedule
}
if schedule.Status.Phase != api.SchedulePhaseEnabled {
return nil
}
// check for the schedule being due to run, and submit a Backup if so
if err := controller.submitBackupIfDue(schedule, cronSchedule); err != nil {
glog.V(4).Infof("error processing Schedule %v/%v: err=%v", schedule.Namespace, schedule.Name, err)
return err
}
return nil
}
func cloneSchedule(in interface{}) (*api.Schedule, error) {
clone, err := scheme.Scheme.DeepCopy(in)
if err != nil {
return nil, err
}
out, ok := clone.(*api.Schedule)
if !ok {
return nil, fmt.Errorf("unexpected type: %T", clone)
}
return out, nil
}
func parseCronSchedule(itm *api.Schedule) (cron.Schedule, []string) {
var validationErrors []string
var schedule cron.Schedule
// cron.Parse panics if schedule is empty
if len(itm.Spec.Schedule) == 0 {
validationErrors = append(validationErrors, "Schedule must be a non-empty valid Cron expression")
return nil, validationErrors
}
// adding a recover() around cron.Parse because it panics on empty string and is possible
// that it panics under other scenarios as well.
func() {
defer func() {
if r := recover(); r != nil {
glog.V(4).Infof("panic parsing schedule %v/%v, cron schedule=%v: %v", itm.Namespace, itm.Name, itm.Spec.Schedule, r)
validationErrors = append(validationErrors, fmt.Sprintf("invalid schedule: %v", r))
}
}()
if res, err := cron.Parse(itm.Spec.Schedule); err != nil {
glog.V(4).Infof("error parsing schedule %v/%v, cron schedule=%v: %v", itm.Namespace, itm.Name, itm.Spec.Schedule, err)
validationErrors = append(validationErrors, fmt.Sprintf("invalid schedule: %v", err))
} else {
schedule = res
}
}()
if len(validationErrors) > 0 {
return nil, validationErrors
}
return schedule, nil
}
func (controller *scheduleController) submitBackupIfDue(item *api.Schedule, cronSchedule cron.Schedule) error {
now := controller.clock.Now()
isDue, nextRunTime := getNextRunTime(item, cronSchedule, now)
if !isDue {
glog.Infof("Next run time for %v/%v is %v, skipping...", item.Namespace, item.Name, nextRunTime)
return nil
}
// Don't attempt to "catch up" if there are any missed or failed runs - simply
// trigger a Backup if it's time.
//
// It might also make sense in the future to explicitly check for currently-running
// backups so that we don't overlap runs (for disk snapshots in particular, this can
// lead to performance issues).
glog.Infof("Next run time for %v/%v is %v, submitting Backup...", item.Namespace, item.Name, nextRunTime)
backup := getBackup(item, now)
if _, err := controller.backupsClient.Backups(backup.Namespace).Create(backup); err != nil {
glog.V(4).Infof("error creating Backup: %v", err)
return err
}
schedule, err := cloneSchedule(item)
if err != nil {
glog.V(4).Infof("error cloning Schedule %v/%v: %v", item.Namespace, item.Name, err)
return err
}
schedule.Status.LastBackup = metav1.NewTime(now)
if _, err := controller.schedulesClient.Schedules(schedule.Namespace).Update(schedule); err != nil {
glog.V(4).Infof("error updating LastBackup for Schedule %v/%v: %v", schedule.Namespace, schedule.Name, err)
return err
}
return nil
}
func getNextRunTime(schedule *api.Schedule, cronSchedule cron.Schedule, asOf time.Time) (bool, time.Time) {
// get the latest run time (if the schedule hasn't run yet, this will be the zero value which will trigger
// an immediate backup)
lastBackupTime := schedule.Status.LastBackup.Time
nextRunTime := cronSchedule.Next(lastBackupTime)
return asOf.After(nextRunTime), nextRunTime
}
func getBackup(item *api.Schedule, timestamp time.Time) *api.Backup {
backup := &api.Backup{
Spec: item.Spec.Template,
ObjectMeta: metav1.ObjectMeta{
Namespace: item.Namespace,
Name: fmt.Sprintf("%s-%s", item.Name, timestamp.Format("20060102150405")),
Labels: map[string]string{
"ark-schedule": item.Name,
},
},
}
return backup
}

View File

@@ -0,0 +1,372 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package controller
import (
"testing"
"time"
"github.com/robfig/cron"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/client-go/kubernetes/scheme"
core "k8s.io/client-go/testing"
"k8s.io/client-go/tools/cache"
api "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/fake"
informers "github.com/heptio/ark/pkg/generated/informers/externalversions"
. "github.com/heptio/ark/pkg/util/test"
)
func TestProcessSchedule(t *testing.T) {
tests := []struct {
name string
scheduleKey string
schedule *api.Schedule
fakeClockTime string
expectedErr bool
expectedSchedulePhaseUpdate *api.Schedule
expectedScheduleLastBackupUpdate *api.Schedule
expectedBackupCreate *api.Backup
}{
{
name: "invalid key returns error",
scheduleKey: "invalid/key/value",
expectedErr: true,
},
{
name: "missing schedule returns early without an error",
scheduleKey: "foo/bar",
expectedErr: false,
},
{
name: "schedule with phase FailedValidation does not get processed",
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).Schedule,
expectedErr: false,
},
{
name: "schedule with phase New gets validated and failed if invalid",
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).Schedule,
expectedErr: false,
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).
WithValidationError("Schedule must be a non-empty valid Cron expression").Schedule,
},
{
name: "schedule with phase <blank> gets validated and failed if invalid",
schedule: NewTestSchedule("ns", "name").Schedule,
expectedErr: false,
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).
WithValidationError("Schedule must be a non-empty valid Cron expression").Schedule,
},
{
name: "schedule with phase Enabled gets re-validated and failed if invalid",
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).Schedule,
expectedErr: false,
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseFailedValidation).
WithValidationError("Schedule must be a non-empty valid Cron expression").Schedule,
},
{
name: "schedule with phase New gets validated and triggers a backup",
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseNew).WithCronSchedule("@every 5m").Schedule,
fakeClockTime: "2017-01-01 12:00:00",
expectedErr: false,
expectedSchedulePhaseUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule,
expectedBackupCreate: NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
expectedScheduleLastBackupUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
WithCronSchedule("@every 5m").WithLastBackupTime("2017-01-01 12:00:00").Schedule,
},
{
name: "schedule with phase Enabled gets re-validated and triggers a backup if valid",
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).WithCronSchedule("@every 5m").Schedule,
fakeClockTime: "2017-01-01 12:00:00",
expectedErr: false,
expectedBackupCreate: NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
expectedScheduleLastBackupUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
WithCronSchedule("@every 5m").WithLastBackupTime("2017-01-01 12:00:00").Schedule,
},
{
name: "schedule that's already run gets LastBackup updated",
schedule: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
WithCronSchedule("@every 5m").WithLastBackupTime("2000-01-01 00:00:00").Schedule,
fakeClockTime: "2017-01-01 12:00:00",
expectedErr: false,
expectedBackupCreate: NewTestBackup().WithNamespace("ns").WithName("name-20170101120000").WithLabel("ark-schedule", "name").Backup,
expectedScheduleLastBackupUpdate: NewTestSchedule("ns", "name").WithPhase(api.SchedulePhaseEnabled).
WithCronSchedule("@every 5m").WithLastBackupTime("2017-01-01 12:00:00").Schedule,
},
}
// flag.Set("logtostderr", "true")
// flag.Set("v", "4")
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
var (
client = fake.NewSimpleClientset()
sharedInformers = informers.NewSharedInformerFactory(client, 0)
)
c := NewScheduleController(
client.ArkV1(),
client.ArkV1(),
sharedInformers.Ark().V1().Schedules(),
time.Duration(0),
)
var (
testTime time.Time
err error
)
if test.fakeClockTime != "" {
testTime, err = time.Parse("2006-01-02 15:04:05", test.fakeClockTime)
require.NoErrorf(t, err, "unable to parse test.fakeClockTime: %v", err)
}
c.clock = clock.NewFakeClock(testTime)
if test.schedule != nil {
sharedInformers.Ark().V1().Schedules().Informer().GetStore().Add(test.schedule)
// this is necessary so the Update() call returns the appropriate object
client.PrependReactor("update", "schedules", func(action core.Action) (bool, runtime.Object, error) {
obj := action.(core.UpdateAction).GetObject()
// need to deep copy so we can test the schedule state for each call to update
copy, err := scheme.Scheme.DeepCopy(obj)
if err != nil {
return false, nil, err
}
ret := copy.(runtime.Object)
return true, ret, nil
})
}
key := test.scheduleKey
if key == "" && test.schedule != nil {
key, err = cache.MetaNamespaceKeyFunc(test.schedule)
require.NoErrorf(t, err, "error getting key from test.schedule: %v", err)
}
err = c.processSchedule(key)
assert.Equal(t, test.expectedErr, err != nil, "got error %v", err)
expectedActions := make([]core.Action, 0)
if upd := test.expectedSchedulePhaseUpdate; upd != nil {
action := core.NewUpdateAction(
api.SchemeGroupVersion.WithResource("schedules"),
upd.Namespace,
upd)
expectedActions = append(expectedActions, action)
}
if created := test.expectedBackupCreate; created != nil {
action := core.NewCreateAction(
api.SchemeGroupVersion.WithResource("backups"),
created.Namespace,
created)
expectedActions = append(expectedActions, action)
}
if upd := test.expectedScheduleLastBackupUpdate; upd != nil {
action := core.NewUpdateAction(
api.SchemeGroupVersion.WithResource("schedules"),
upd.Namespace,
upd)
expectedActions = append(expectedActions, action)
}
assert.Equal(t, expectedActions, client.Actions())
})
}
}
func TestGetNextRunTime(t *testing.T) {
tests := []struct {
name string
schedule *api.Schedule
lastRanOffset string
expectedDue bool
expectedNextRunTimeOffset string
}{
{
name: "first run",
schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}},
expectedDue: true,
expectedNextRunTimeOffset: "5m",
},
{
name: "just ran",
schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}},
lastRanOffset: "0s",
expectedDue: false,
expectedNextRunTimeOffset: "5m",
},
{
name: "almost but not quite time to run",
schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}},
lastRanOffset: "4m59s",
expectedDue: false,
expectedNextRunTimeOffset: "5m",
},
{
name: "time to run again",
schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}},
lastRanOffset: "5m",
expectedDue: true,
expectedNextRunTimeOffset: "5m",
},
{
name: "several runs missed",
schedule: &api.Schedule{Spec: api.ScheduleSpec{Schedule: "@every 5m"}},
lastRanOffset: "5h",
expectedDue: true,
expectedNextRunTimeOffset: "5m",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
cronSchedule, err := cron.Parse(test.schedule.Spec.Schedule)
require.NoErrorf(t, err, "unable to parse test.schedule.Spec.Schedule: %v", err)
testClock := clock.NewFakeClock(time.Now())
if test.lastRanOffset != "" {
offsetDuration, err := time.ParseDuration(test.lastRanOffset)
require.NoErrorf(t, err, "unable to parse test.lastRanOffset: %v", err)
test.schedule.Status.LastBackup = metav1.Time{Time: testClock.Now().Add(-offsetDuration)}
}
nextRunTimeOffset, err := time.ParseDuration(test.expectedNextRunTimeOffset)
if err != nil {
panic(err)
}
expectedNextRunTime := test.schedule.Status.LastBackup.Add(nextRunTimeOffset)
due, nextRunTime := getNextRunTime(test.schedule, cronSchedule, testClock.Now())
assert.Equal(t, test.expectedDue, due)
// ignore diffs of under a second. the cron library does some rounding.
assert.WithinDuration(t, expectedNextRunTime, nextRunTime, time.Second)
})
}
}
func TestGetBackup(t *testing.T) {
tests := []struct {
name string
schedule *api.Schedule
testClockTime string
expectedBackup *api.Backup
}{
{
name: "ensure name is formatted correctly (AM time)",
schedule: &api.Schedule{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: api.ScheduleSpec{
Template: api.BackupSpec{},
},
},
testClockTime: "2017-07-25 09:15:00",
expectedBackup: &api.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar-20170725091500",
},
Spec: api.BackupSpec{},
},
},
{
name: "ensure name is formatted correctly (PM time)",
schedule: &api.Schedule{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: api.ScheduleSpec{
Template: api.BackupSpec{},
},
},
testClockTime: "2017-07-25 14:15:00",
expectedBackup: &api.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar-20170725141500",
},
Spec: api.BackupSpec{},
},
},
{
name: "ensure schedule backup template is copied",
schedule: &api.Schedule{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: api.ScheduleSpec{
Template: api.BackupSpec{
IncludedNamespaces: []string{"ns-1", "ns-2"},
ExcludedNamespaces: []string{"ns-3"},
IncludedResources: []string{"foo", "bar"},
ExcludedResources: []string{"baz"},
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
SnapshotVolumes: true,
TTL: metav1.Duration{Duration: time.Duration(300)},
},
},
},
testClockTime: "2017-07-25 09:15:00",
expectedBackup: &api.Backup{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar-20170725091500",
},
Spec: api.BackupSpec{
IncludedNamespaces: []string{"ns-1", "ns-2"},
ExcludedNamespaces: []string{"ns-3"},
IncludedResources: []string{"foo", "bar"},
ExcludedResources: []string{"baz"},
LabelSelector: &metav1.LabelSelector{MatchLabels: map[string]string{"label": "value"}},
SnapshotVolumes: true,
TTL: metav1.Duration{Duration: time.Duration(300)},
},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testTime, err := time.Parse("2006-01-02 15:04:05", test.testClockTime)
require.NoErrorf(t, err, "unable to parse test.testClockTime: %v", err)
backup := getBackup(test.schedule, clock.NewFakeClock(testTime).Now())
assert.Equal(t, test.expectedBackup.Namespace, backup.Namespace)
assert.Equal(t, test.expectedBackup.Name, backup.Name)
assert.Equal(t, test.expectedBackup.Spec, backup.Spec)
})
}
}

141
pkg/discovery/helper.go Normal file
View File

@@ -0,0 +1,141 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discovery
import (
"sort"
"sync"
kcmdutil "github.com/heptio/ark/third_party/kubernetes/pkg/kubectl/cmd/util"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/pkg/api"
)
// Helper exposes functions for interacting with the Kubernetes discovery
// API.
type Helper interface {
// Mapper gets a RESTMapper for the current set of resources retrieved
// from discovery.
Mapper() meta.RESTMapper
// Resources gets the current set of resources retrieved from discovery
// that are backuppable by Ark.
Resources() []*metav1.APIResourceList
// Refresh pulls an updated set of Ark-backuppable resources from the
// discovery API.
Refresh() error
}
type helper struct {
discoveryClient discovery.DiscoveryInterface
// lock guards mapper and resources
lock sync.RWMutex
mapper meta.RESTMapper
resources []*metav1.APIResourceList
}
var _ Helper = &helper{}
func NewHelper(discoveryClient discovery.DiscoveryInterface) (Helper, error) {
h := &helper{
discoveryClient: discoveryClient,
}
if err := h.Refresh(); err != nil {
return nil, err
}
return h, nil
}
func (h *helper) Refresh() error {
h.lock.Lock()
defer h.lock.Unlock()
groupResources, err := discovery.GetAPIGroupResources(h.discoveryClient)
if err != nil {
return err
}
mapper := discovery.NewRESTMapper(groupResources, dynamic.VersionInterfaces)
shortcutExpander, err := kcmdutil.NewShortcutExpander(mapper, h.discoveryClient)
if err != nil {
return err
}
h.mapper = shortcutExpander
preferredResources, err := h.discoveryClient.ServerPreferredResources()
if err != nil {
return err
}
h.resources = discovery.FilteredBy(
discovery.ResourcePredicateFunc(func(groupVersion string, r *metav1.APIResource) bool {
if groupVersion == api.SchemeGroupVersion.String() {
return false
}
return discovery.SupportsAllVerbs{Verbs: []string{"list", "create"}}.Match(groupVersion, r)
}),
preferredResources,
)
sortResources(h.resources)
return nil
}
// sortResources sources resources by moving extensions to the end of the slice. The order of all
// the other resources is preserved.
func sortResources(resources []*metav1.APIResourceList) {
sort.SliceStable(resources, func(i, j int) bool {
left := resources[i]
leftGV, _ := schema.ParseGroupVersion(left.GroupVersion)
// not checking error because it should be impossible to fail to parse data coming from the
// apiserver
if leftGV.Group == "extensions" {
// always sort extensions at the bottom by saying left is "greater"
return false
}
right := resources[j]
rightGV, _ := schema.ParseGroupVersion(right.GroupVersion)
// not checking error because it should be impossible to fail to parse data coming from the
// apiserver
if rightGV.Group == "extensions" {
// always sort extensions at the bottom by saying left is "less"
return true
}
return i < j
})
}
func (h *helper) Mapper() meta.RESTMapper {
h.lock.RLock()
defer h.lock.RUnlock()
return h.mapper
}
func (h *helper) Resources() []*metav1.APIResourceList {
h.lock.RLock()
defer h.lock.RUnlock()
return h.resources
}

View File

@@ -0,0 +1,86 @@
/*
Copyright 2017 Heptio Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package discovery
import (
"testing"
"github.com/stretchr/testify/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestSortResources(t *testing.T) {
tests := []struct {
name string
resources []*metav1.APIResourceList
expected []*metav1.APIResourceList
}{
{
name: "no resources",
},
{
name: "no extensions, order is preserved",
resources: []*metav1.APIResourceList{
{GroupVersion: "v1"},
{GroupVersion: "groupC/v1"},
{GroupVersion: "groupA/v1"},
{GroupVersion: "groupB/v1"},
},
expected: []*metav1.APIResourceList{
{GroupVersion: "v1"},
{GroupVersion: "groupC/v1"},
{GroupVersion: "groupA/v1"},
{GroupVersion: "groupB/v1"},
},
},
{
name: "extensions moves to end, order is preserved",
resources: []*metav1.APIResourceList{
{GroupVersion: "extensions/v1beta1"},
{GroupVersion: "v1"},
{GroupVersion: "groupC/v1"},
{GroupVersion: "groupA/v1"},
{GroupVersion: "groupB/v1"},
{GroupVersion: "apps/v1beta1"},
},
expected: []*metav1.APIResourceList{
{GroupVersion: "v1"},
{GroupVersion: "groupC/v1"},
{GroupVersion: "groupA/v1"},
{GroupVersion: "groupB/v1"},
{GroupVersion: "apps/v1beta1"},
{GroupVersion: "extensions/v1beta1"},
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Logf("before")
for _, r := range test.resources {
t.Logf(r.GroupVersion)
}
sortResources(test.resources)
t.Logf("after")
for _, r := range test.resources {
t.Logf(r.GroupVersion)
}
assert.Equal(t, test.expected, test.resources)
})
}
}

View File

@@ -0,0 +1,88 @@
package clientset
import (
glog "github.com/golang/glog"
arkv1 "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
ArkV1() arkv1.ArkV1Interface
// Deprecated: please explicitly pick a version if possible.
Ark() arkv1.ArkV1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
*arkv1.ArkV1Client
}
// ArkV1 retrieves the ArkV1Client
func (c *Clientset) ArkV1() arkv1.ArkV1Interface {
if c == nil {
return nil
}
return c.ArkV1Client
}
// Deprecated: Ark retrieves the default version of ArkClient.
// Please explicitly pick a version.
func (c *Clientset) Ark() arkv1.ArkV1Interface {
if c == nil {
return nil
}
return c.ArkV1Client
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.ArkV1Client, err = arkv1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
glog.Errorf("failed to create the DiscoveryClient: %v", err)
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.ArkV1Client = arkv1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.ArkV1Client = arkv1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}

View File

@@ -0,0 +1,4 @@
// This package is generated by client-gen with custom arguments.
// This package has the automatically generated clientset.
package clientset

View File

@@ -0,0 +1,55 @@
package fake
import (
clientset "github.com/heptio/ark/pkg/generated/clientset"
arkv1 "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
fakearkv1 "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
fakePtr := testing.Fake{}
fakePtr.AddReactor("*", "*", testing.ObjectReaction(o))
fakePtr.AddWatchReactor("*", testing.DefaultWatchReactor(watch.NewFake(), nil))
return &Clientset{fakePtr}
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return &fakediscovery.FakeDiscovery{Fake: &c.Fake}
}
var _ clientset.Interface = &Clientset{}
// ArkV1 retrieves the ArkV1Client
func (c *Clientset) ArkV1() arkv1.ArkV1Interface {
return &fakearkv1.FakeArkV1{Fake: &c.Fake}
}
// Ark retrieves the ArkV1Client
func (c *Clientset) Ark() arkv1.ArkV1Interface {
return &fakearkv1.FakeArkV1{Fake: &c.Fake}
}

View File

@@ -0,0 +1,4 @@
// This package is generated by client-gen with custom arguments.
// This package has the automatically generated fake clientset.
package fake

View File

@@ -0,0 +1,37 @@
package fake
import (
arkv1 "github.com/heptio/ark/pkg/apis/ark/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(scheme)
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
arkv1.AddToScheme(scheme)
}

View File

@@ -0,0 +1,4 @@
// This package is generated by client-gen with custom arguments.
// This package contains the scheme of the automatically generated clientset.
package scheme

View File

@@ -0,0 +1,37 @@
package scheme
import (
arkv1 "github.com/heptio/ark/pkg/apis/ark/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
AddToScheme(Scheme)
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kuberentes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
func AddToScheme(scheme *runtime.Scheme) {
arkv1.AddToScheme(scheme)
}

View File

@@ -0,0 +1,87 @@
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
"github.com/heptio/ark/pkg/generated/clientset/scheme"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
)
type ArkV1Interface interface {
RESTClient() rest.Interface
BackupsGetter
ConfigsGetter
RestoresGetter
SchedulesGetter
}
// ArkV1Client is used to interact with features provided by the ark.heptio.com group.
type ArkV1Client struct {
restClient rest.Interface
}
func (c *ArkV1Client) Backups(namespace string) BackupInterface {
return newBackups(c, namespace)
}
func (c *ArkV1Client) Configs(namespace string) ConfigInterface {
return newConfigs(c, namespace)
}
func (c *ArkV1Client) Restores(namespace string) RestoreInterface {
return newRestores(c, namespace)
}
func (c *ArkV1Client) Schedules(namespace string) ScheduleInterface {
return newSchedules(c, namespace)
}
// NewForConfig creates a new ArkV1Client for the given config.
func NewForConfig(c *rest.Config) (*ArkV1Client, error) {
config := *c
if err := setConfigDefaults(&config); err != nil {
return nil, err
}
client, err := rest.RESTClientFor(&config)
if err != nil {
return nil, err
}
return &ArkV1Client{client}, nil
}
// NewForConfigOrDie creates a new ArkV1Client for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *ArkV1Client {
client, err := NewForConfig(c)
if err != nil {
panic(err)
}
return client
}
// New creates a new ArkV1Client for the given RESTClient.
func New(c rest.Interface) *ArkV1Client {
return &ArkV1Client{c}
}
func setConfigDefaults(config *rest.Config) error {
gv := v1.SchemeGroupVersion
config.GroupVersion = &gv
config.APIPath = "/apis"
config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs}
if config.UserAgent == "" {
config.UserAgent = rest.DefaultKubernetesUserAgent()
}
return nil
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *ArkV1Client) RESTClient() rest.Interface {
if c == nil {
return nil
}
return c.restClient
}

View File

@@ -0,0 +1,156 @@
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
scheme "github.com/heptio/ark/pkg/generated/clientset/scheme"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// BackupsGetter has a method to return a BackupInterface.
// A group's client should implement this interface.
type BackupsGetter interface {
Backups(namespace string) BackupInterface
}
// BackupInterface has methods to work with Backup resources.
type BackupInterface interface {
Create(*v1.Backup) (*v1.Backup, error)
Update(*v1.Backup) (*v1.Backup, error)
UpdateStatus(*v1.Backup) (*v1.Backup, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.Backup, error)
List(opts meta_v1.ListOptions) (*v1.BackupList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Backup, err error)
BackupExpansion
}
// backups implements BackupInterface
type backups struct {
client rest.Interface
ns string
}
// newBackups returns a Backups
func newBackups(c *ArkV1Client, namespace string) *backups {
return &backups{
client: c.RESTClient(),
ns: namespace,
}
}
// Create takes the representation of a backup and creates it. Returns the server's representation of the backup, and an error, if there is any.
func (c *backups) Create(backup *v1.Backup) (result *v1.Backup, err error) {
result = &v1.Backup{}
err = c.client.Post().
Namespace(c.ns).
Resource("backups").
Body(backup).
Do().
Into(result)
return
}
// Update takes the representation of a backup and updates it. Returns the server's representation of the backup, and an error, if there is any.
func (c *backups) Update(backup *v1.Backup) (result *v1.Backup, err error) {
result = &v1.Backup{}
err = c.client.Put().
Namespace(c.ns).
Resource("backups").
Name(backup.Name).
Body(backup).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus().
func (c *backups) UpdateStatus(backup *v1.Backup) (result *v1.Backup, err error) {
result = &v1.Backup{}
err = c.client.Put().
Namespace(c.ns).
Resource("backups").
Name(backup.Name).
SubResource("status").
Body(backup).
Do().
Into(result)
return
}
// Delete takes name of the backup and deletes it. Returns an error if one occurs.
func (c *backups) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("backups").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *backups) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("backups").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the backup, and returns the corresponding backup object, and an error if there is any.
func (c *backups) Get(name string, options meta_v1.GetOptions) (result *v1.Backup, err error) {
result = &v1.Backup{}
err = c.client.Get().
Namespace(c.ns).
Resource("backups").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Backups that match those selectors.
func (c *backups) List(opts meta_v1.ListOptions) (result *v1.BackupList, err error) {
result = &v1.BackupList{}
err = c.client.Get().
Namespace(c.ns).
Resource("backups").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested backups.
func (c *backups) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("backups").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched backup.
func (c *backups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Backup, err error) {
result = &v1.Backup{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("backups").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -0,0 +1,139 @@
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
scheme "github.com/heptio/ark/pkg/generated/clientset/scheme"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// ConfigsGetter has a method to return a ConfigInterface.
// A group's client should implement this interface.
type ConfigsGetter interface {
Configs(namespace string) ConfigInterface
}
// ConfigInterface has methods to work with Config resources.
type ConfigInterface interface {
Create(*v1.Config) (*v1.Config, error)
Update(*v1.Config) (*v1.Config, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.Config, error)
List(opts meta_v1.ListOptions) (*v1.ConfigList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Config, err error)
ConfigExpansion
}
// configs implements ConfigInterface
type configs struct {
client rest.Interface
ns string
}
// newConfigs returns a Configs
func newConfigs(c *ArkV1Client, namespace string) *configs {
return &configs{
client: c.RESTClient(),
ns: namespace,
}
}
// Create takes the representation of a config and creates it. Returns the server's representation of the config, and an error, if there is any.
func (c *configs) Create(config *v1.Config) (result *v1.Config, err error) {
result = &v1.Config{}
err = c.client.Post().
Namespace(c.ns).
Resource("configs").
Body(config).
Do().
Into(result)
return
}
// Update takes the representation of a config and updates it. Returns the server's representation of the config, and an error, if there is any.
func (c *configs) Update(config *v1.Config) (result *v1.Config, err error) {
result = &v1.Config{}
err = c.client.Put().
Namespace(c.ns).
Resource("configs").
Name(config.Name).
Body(config).
Do().
Into(result)
return
}
// Delete takes name of the config and deletes it. Returns an error if one occurs.
func (c *configs) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("configs").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *configs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("configs").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the config, and returns the corresponding config object, and an error if there is any.
func (c *configs) Get(name string, options meta_v1.GetOptions) (result *v1.Config, err error) {
result = &v1.Config{}
err = c.client.Get().
Namespace(c.ns).
Resource("configs").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Configs that match those selectors.
func (c *configs) List(opts meta_v1.ListOptions) (result *v1.ConfigList, err error) {
result = &v1.ConfigList{}
err = c.client.Get().
Namespace(c.ns).
Resource("configs").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested configs.
func (c *configs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("configs").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched config.
func (c *configs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Config, err error) {
result = &v1.Config{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("configs").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -0,0 +1,4 @@
// This package is generated by client-gen with custom arguments.
// This package has the automatically generated typed clients.
package v1

View File

@@ -0,0 +1,4 @@
// This package is generated by client-gen with custom arguments.
// Package fake has the automatically generated clients.
package fake

View File

@@ -0,0 +1,34 @@
package fake
import (
v1 "github.com/heptio/ark/pkg/generated/clientset/typed/ark/v1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
type FakeArkV1 struct {
*testing.Fake
}
func (c *FakeArkV1) Backups(namespace string) v1.BackupInterface {
return &FakeBackups{c, namespace}
}
func (c *FakeArkV1) Configs(namespace string) v1.ConfigInterface {
return &FakeConfigs{c, namespace}
}
func (c *FakeArkV1) Restores(namespace string) v1.RestoreInterface {
return &FakeRestores{c, namespace}
}
func (c *FakeArkV1) Schedules(namespace string) v1.ScheduleInterface {
return &FakeSchedules{c, namespace}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeArkV1) RESTClient() rest.Interface {
var ret *rest.RESTClient
return ret
}

View File

@@ -0,0 +1,114 @@
package fake
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeBackups implements BackupInterface
type FakeBackups struct {
Fake *FakeArkV1
ns string
}
var backupsResource = schema.GroupVersionResource{Group: "ark.heptio.com", Version: "v1", Resource: "backups"}
var backupsKind = schema.GroupVersionKind{Group: "ark.heptio.com", Version: "v1", Kind: "Backup"}
func (c *FakeBackups) Create(backup *v1.Backup) (result *v1.Backup, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(backupsResource, c.ns, backup), &v1.Backup{})
if obj == nil {
return nil, err
}
return obj.(*v1.Backup), err
}
func (c *FakeBackups) Update(backup *v1.Backup) (result *v1.Backup, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(backupsResource, c.ns, backup), &v1.Backup{})
if obj == nil {
return nil, err
}
return obj.(*v1.Backup), err
}
func (c *FakeBackups) UpdateStatus(backup *v1.Backup) (*v1.Backup, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(backupsResource, "status", c.ns, backup), &v1.Backup{})
if obj == nil {
return nil, err
}
return obj.(*v1.Backup), err
}
func (c *FakeBackups) Delete(name string, options *meta_v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(backupsResource, c.ns, name), &v1.Backup{})
return err
}
func (c *FakeBackups) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(backupsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1.BackupList{})
return err
}
func (c *FakeBackups) Get(name string, options meta_v1.GetOptions) (result *v1.Backup, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(backupsResource, c.ns, name), &v1.Backup{})
if obj == nil {
return nil, err
}
return obj.(*v1.Backup), err
}
func (c *FakeBackups) List(opts meta_v1.ListOptions) (result *v1.BackupList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(backupsResource, backupsKind, c.ns, opts), &v1.BackupList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1.BackupList{}
for _, item := range obj.(*v1.BackupList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested backups.
func (c *FakeBackups) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(backupsResource, c.ns, opts))
}
// Patch applies the patch and returns the patched backup.
func (c *FakeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Backup, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(backupsResource, c.ns, name, data, subresources...), &v1.Backup{})
if obj == nil {
return nil, err
}
return obj.(*v1.Backup), err
}

View File

@@ -0,0 +1,104 @@
package fake
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeConfigs implements ConfigInterface
type FakeConfigs struct {
Fake *FakeArkV1
ns string
}
var configsResource = schema.GroupVersionResource{Group: "ark.heptio.com", Version: "v1", Resource: "configs"}
var configsKind = schema.GroupVersionKind{Group: "ark.heptio.com", Version: "v1", Kind: "Config"}
func (c *FakeConfigs) Create(config *v1.Config) (result *v1.Config, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(configsResource, c.ns, config), &v1.Config{})
if obj == nil {
return nil, err
}
return obj.(*v1.Config), err
}
func (c *FakeConfigs) Update(config *v1.Config) (result *v1.Config, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(configsResource, c.ns, config), &v1.Config{})
if obj == nil {
return nil, err
}
return obj.(*v1.Config), err
}
func (c *FakeConfigs) Delete(name string, options *meta_v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(configsResource, c.ns, name), &v1.Config{})
return err
}
func (c *FakeConfigs) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(configsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1.ConfigList{})
return err
}
func (c *FakeConfigs) Get(name string, options meta_v1.GetOptions) (result *v1.Config, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(configsResource, c.ns, name), &v1.Config{})
if obj == nil {
return nil, err
}
return obj.(*v1.Config), err
}
func (c *FakeConfigs) List(opts meta_v1.ListOptions) (result *v1.ConfigList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(configsResource, configsKind, c.ns, opts), &v1.ConfigList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1.ConfigList{}
for _, item := range obj.(*v1.ConfigList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested configs.
func (c *FakeConfigs) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(configsResource, c.ns, opts))
}
// Patch applies the patch and returns the patched config.
func (c *FakeConfigs) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Config, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(configsResource, c.ns, name, data, subresources...), &v1.Config{})
if obj == nil {
return nil, err
}
return obj.(*v1.Config), err
}

View File

@@ -0,0 +1,114 @@
package fake
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeRestores implements RestoreInterface
type FakeRestores struct {
Fake *FakeArkV1
ns string
}
var restoresResource = schema.GroupVersionResource{Group: "ark.heptio.com", Version: "v1", Resource: "restores"}
var restoresKind = schema.GroupVersionKind{Group: "ark.heptio.com", Version: "v1", Kind: "Restore"}
func (c *FakeRestores) Create(restore *v1.Restore) (result *v1.Restore, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(restoresResource, c.ns, restore), &v1.Restore{})
if obj == nil {
return nil, err
}
return obj.(*v1.Restore), err
}
func (c *FakeRestores) Update(restore *v1.Restore) (result *v1.Restore, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(restoresResource, c.ns, restore), &v1.Restore{})
if obj == nil {
return nil, err
}
return obj.(*v1.Restore), err
}
func (c *FakeRestores) UpdateStatus(restore *v1.Restore) (*v1.Restore, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(restoresResource, "status", c.ns, restore), &v1.Restore{})
if obj == nil {
return nil, err
}
return obj.(*v1.Restore), err
}
func (c *FakeRestores) Delete(name string, options *meta_v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(restoresResource, c.ns, name), &v1.Restore{})
return err
}
func (c *FakeRestores) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(restoresResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1.RestoreList{})
return err
}
func (c *FakeRestores) Get(name string, options meta_v1.GetOptions) (result *v1.Restore, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(restoresResource, c.ns, name), &v1.Restore{})
if obj == nil {
return nil, err
}
return obj.(*v1.Restore), err
}
func (c *FakeRestores) List(opts meta_v1.ListOptions) (result *v1.RestoreList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(restoresResource, restoresKind, c.ns, opts), &v1.RestoreList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1.RestoreList{}
for _, item := range obj.(*v1.RestoreList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested restores.
func (c *FakeRestores) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(restoresResource, c.ns, opts))
}
// Patch applies the patch and returns the patched restore.
func (c *FakeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Restore, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(restoresResource, c.ns, name, data, subresources...), &v1.Restore{})
if obj == nil {
return nil, err
}
return obj.(*v1.Restore), err
}

View File

@@ -0,0 +1,114 @@
package fake
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeSchedules implements ScheduleInterface
type FakeSchedules struct {
Fake *FakeArkV1
ns string
}
var schedulesResource = schema.GroupVersionResource{Group: "ark.heptio.com", Version: "v1", Resource: "schedules"}
var schedulesKind = schema.GroupVersionKind{Group: "ark.heptio.com", Version: "v1", Kind: "Schedule"}
func (c *FakeSchedules) Create(schedule *v1.Schedule) (result *v1.Schedule, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(schedulesResource, c.ns, schedule), &v1.Schedule{})
if obj == nil {
return nil, err
}
return obj.(*v1.Schedule), err
}
func (c *FakeSchedules) Update(schedule *v1.Schedule) (result *v1.Schedule, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(schedulesResource, c.ns, schedule), &v1.Schedule{})
if obj == nil {
return nil, err
}
return obj.(*v1.Schedule), err
}
func (c *FakeSchedules) UpdateStatus(schedule *v1.Schedule) (*v1.Schedule, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(schedulesResource, "status", c.ns, schedule), &v1.Schedule{})
if obj == nil {
return nil, err
}
return obj.(*v1.Schedule), err
}
func (c *FakeSchedules) Delete(name string, options *meta_v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(schedulesResource, c.ns, name), &v1.Schedule{})
return err
}
func (c *FakeSchedules) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(schedulesResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &v1.ScheduleList{})
return err
}
func (c *FakeSchedules) Get(name string, options meta_v1.GetOptions) (result *v1.Schedule, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(schedulesResource, c.ns, name), &v1.Schedule{})
if obj == nil {
return nil, err
}
return obj.(*v1.Schedule), err
}
func (c *FakeSchedules) List(opts meta_v1.ListOptions) (result *v1.ScheduleList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(schedulesResource, schedulesKind, c.ns, opts), &v1.ScheduleList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1.ScheduleList{}
for _, item := range obj.(*v1.ScheduleList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested schedules.
func (c *FakeSchedules) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(schedulesResource, c.ns, opts))
}
// Patch applies the patch and returns the patched schedule.
func (c *FakeSchedules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Schedule, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(schedulesResource, c.ns, name, data, subresources...), &v1.Schedule{})
if obj == nil {
return nil, err
}
return obj.(*v1.Schedule), err
}

View File

@@ -0,0 +1,9 @@
package v1
type BackupExpansion interface{}
type ConfigExpansion interface{}
type RestoreExpansion interface{}
type ScheduleExpansion interface{}

View File

@@ -0,0 +1,156 @@
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
scheme "github.com/heptio/ark/pkg/generated/clientset/scheme"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// RestoresGetter has a method to return a RestoreInterface.
// A group's client should implement this interface.
type RestoresGetter interface {
Restores(namespace string) RestoreInterface
}
// RestoreInterface has methods to work with Restore resources.
type RestoreInterface interface {
Create(*v1.Restore) (*v1.Restore, error)
Update(*v1.Restore) (*v1.Restore, error)
UpdateStatus(*v1.Restore) (*v1.Restore, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.Restore, error)
List(opts meta_v1.ListOptions) (*v1.RestoreList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Restore, err error)
RestoreExpansion
}
// restores implements RestoreInterface
type restores struct {
client rest.Interface
ns string
}
// newRestores returns a Restores
func newRestores(c *ArkV1Client, namespace string) *restores {
return &restores{
client: c.RESTClient(),
ns: namespace,
}
}
// Create takes the representation of a restore and creates it. Returns the server's representation of the restore, and an error, if there is any.
func (c *restores) Create(restore *v1.Restore) (result *v1.Restore, err error) {
result = &v1.Restore{}
err = c.client.Post().
Namespace(c.ns).
Resource("restores").
Body(restore).
Do().
Into(result)
return
}
// Update takes the representation of a restore and updates it. Returns the server's representation of the restore, and an error, if there is any.
func (c *restores) Update(restore *v1.Restore) (result *v1.Restore, err error) {
result = &v1.Restore{}
err = c.client.Put().
Namespace(c.ns).
Resource("restores").
Name(restore.Name).
Body(restore).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus().
func (c *restores) UpdateStatus(restore *v1.Restore) (result *v1.Restore, err error) {
result = &v1.Restore{}
err = c.client.Put().
Namespace(c.ns).
Resource("restores").
Name(restore.Name).
SubResource("status").
Body(restore).
Do().
Into(result)
return
}
// Delete takes name of the restore and deletes it. Returns an error if one occurs.
func (c *restores) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("restores").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *restores) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("restores").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the restore, and returns the corresponding restore object, and an error if there is any.
func (c *restores) Get(name string, options meta_v1.GetOptions) (result *v1.Restore, err error) {
result = &v1.Restore{}
err = c.client.Get().
Namespace(c.ns).
Resource("restores").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Restores that match those selectors.
func (c *restores) List(opts meta_v1.ListOptions) (result *v1.RestoreList, err error) {
result = &v1.RestoreList{}
err = c.client.Get().
Namespace(c.ns).
Resource("restores").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested restores.
func (c *restores) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("restores").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched restore.
func (c *restores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Restore, err error) {
result = &v1.Restore{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("restores").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -0,0 +1,156 @@
package v1
import (
v1 "github.com/heptio/ark/pkg/apis/ark/v1"
scheme "github.com/heptio/ark/pkg/generated/clientset/scheme"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// SchedulesGetter has a method to return a ScheduleInterface.
// A group's client should implement this interface.
type SchedulesGetter interface {
Schedules(namespace string) ScheduleInterface
}
// ScheduleInterface has methods to work with Schedule resources.
type ScheduleInterface interface {
Create(*v1.Schedule) (*v1.Schedule, error)
Update(*v1.Schedule) (*v1.Schedule, error)
UpdateStatus(*v1.Schedule) (*v1.Schedule, error)
Delete(name string, options *meta_v1.DeleteOptions) error
DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error
Get(name string, options meta_v1.GetOptions) (*v1.Schedule, error)
List(opts meta_v1.ListOptions) (*v1.ScheduleList, error)
Watch(opts meta_v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Schedule, err error)
ScheduleExpansion
}
// schedules implements ScheduleInterface
type schedules struct {
client rest.Interface
ns string
}
// newSchedules returns a Schedules
func newSchedules(c *ArkV1Client, namespace string) *schedules {
return &schedules{
client: c.RESTClient(),
ns: namespace,
}
}
// Create takes the representation of a schedule and creates it. Returns the server's representation of the schedule, and an error, if there is any.
func (c *schedules) Create(schedule *v1.Schedule) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Post().
Namespace(c.ns).
Resource("schedules").
Body(schedule).
Do().
Into(result)
return
}
// Update takes the representation of a schedule and updates it. Returns the server's representation of the schedule, and an error, if there is any.
func (c *schedules) Update(schedule *v1.Schedule) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Put().
Namespace(c.ns).
Resource("schedules").
Name(schedule.Name).
Body(schedule).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclientstatus=false comment above the type to avoid generating UpdateStatus().
func (c *schedules) UpdateStatus(schedule *v1.Schedule) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Put().
Namespace(c.ns).
Resource("schedules").
Name(schedule.Name).
SubResource("status").
Body(schedule).
Do().
Into(result)
return
}
// Delete takes name of the schedule and deletes it. Returns an error if one occurs.
func (c *schedules) Delete(name string, options *meta_v1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("schedules").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *schedules) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("schedules").
VersionedParams(&listOptions, scheme.ParameterCodec).
Body(options).
Do().
Error()
}
// Get takes name of the schedule, and returns the corresponding schedule object, and an error if there is any.
func (c *schedules) Get(name string, options meta_v1.GetOptions) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Get().
Namespace(c.ns).
Resource("schedules").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of Schedules that match those selectors.
func (c *schedules) List(opts meta_v1.ListOptions) (result *v1.ScheduleList, err error) {
result = &v1.ScheduleList{}
err = c.client.Get().
Namespace(c.ns).
Resource("schedules").
VersionedParams(&opts, scheme.ParameterCodec).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested schedules.
func (c *schedules) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("schedules").
VersionedParams(&opts, scheme.ParameterCodec).
Watch()
}
// Patch applies the patch and returns the patched schedule.
func (c *schedules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Schedule, err error) {
result = &v1.Schedule{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("schedules").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View File

@@ -0,0 +1,28 @@
// This file was automatically generated by informer-gen
package ark
import (
v1 "github.com/heptio/ark/pkg/generated/informers/externalversions/ark/v1"
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
)
// Interface provides access to each of this group's versions.
type Interface interface {
// V1 provides access to shared informers for resources in V1.
V1() v1.Interface
}
type group struct {
internalinterfaces.SharedInformerFactory
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory) Interface {
return &group{f}
}
// V1 returns a new v1.Interface.
func (g *group) V1() v1.Interface {
return v1.New(g.SharedInformerFactory)
}

View File

@@ -0,0 +1,52 @@
// This file was automatically generated by informer-gen
package v1
import (
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
clientset "github.com/heptio/ark/pkg/generated/clientset"
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
time "time"
)
// BackupInformer provides access to a shared informer and lister for
// Backups.
type BackupInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.BackupLister
}
type backupInformer struct {
factory internalinterfaces.SharedInformerFactory
}
func newBackupInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
return client.ArkV1().Backups(meta_v1.NamespaceAll).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
return client.ArkV1().Backups(meta_v1.NamespaceAll).Watch(options)
},
},
&ark_v1.Backup{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
return sharedIndexInformer
}
func (f *backupInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&ark_v1.Backup{}, newBackupInformer)
}
func (f *backupInformer) Lister() v1.BackupLister {
return v1.NewBackupLister(f.Informer().GetIndexer())
}

View File

@@ -0,0 +1,52 @@
// This file was automatically generated by informer-gen
package v1
import (
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
clientset "github.com/heptio/ark/pkg/generated/clientset"
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
time "time"
)
// ConfigInformer provides access to a shared informer and lister for
// Configs.
type ConfigInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.ConfigLister
}
type configInformer struct {
factory internalinterfaces.SharedInformerFactory
}
func newConfigInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
return client.ArkV1().Configs(meta_v1.NamespaceAll).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
return client.ArkV1().Configs(meta_v1.NamespaceAll).Watch(options)
},
},
&ark_v1.Config{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
return sharedIndexInformer
}
func (f *configInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&ark_v1.Config{}, newConfigInformer)
}
func (f *configInformer) Lister() v1.ConfigLister {
return v1.NewConfigLister(f.Informer().GetIndexer())
}

View File

@@ -0,0 +1,48 @@
// This file was automatically generated by informer-gen
package v1
import (
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
)
// Interface provides access to all the informers in this group version.
type Interface interface {
// Backups returns a BackupInformer.
Backups() BackupInformer
// Configs returns a ConfigInformer.
Configs() ConfigInformer
// Restores returns a RestoreInformer.
Restores() RestoreInformer
// Schedules returns a ScheduleInformer.
Schedules() ScheduleInformer
}
type version struct {
internalinterfaces.SharedInformerFactory
}
// New returns a new Interface.
func New(f internalinterfaces.SharedInformerFactory) Interface {
return &version{f}
}
// Backups returns a BackupInformer.
func (v *version) Backups() BackupInformer {
return &backupInformer{factory: v.SharedInformerFactory}
}
// Configs returns a ConfigInformer.
func (v *version) Configs() ConfigInformer {
return &configInformer{factory: v.SharedInformerFactory}
}
// Restores returns a RestoreInformer.
func (v *version) Restores() RestoreInformer {
return &restoreInformer{factory: v.SharedInformerFactory}
}
// Schedules returns a ScheduleInformer.
func (v *version) Schedules() ScheduleInformer {
return &scheduleInformer{factory: v.SharedInformerFactory}
}

View File

@@ -0,0 +1,52 @@
// This file was automatically generated by informer-gen
package v1
import (
ark_v1 "github.com/heptio/ark/pkg/apis/ark/v1"
clientset "github.com/heptio/ark/pkg/generated/clientset"
internalinterfaces "github.com/heptio/ark/pkg/generated/informers/externalversions/internalinterfaces"
v1 "github.com/heptio/ark/pkg/generated/listers/ark/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
time "time"
)
// RestoreInformer provides access to a shared informer and lister for
// Restores.
type RestoreInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.RestoreLister
}
type restoreInformer struct {
factory internalinterfaces.SharedInformerFactory
}
func newRestoreInformer(client clientset.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
sharedIndexInformer := cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options meta_v1.ListOptions) (runtime.Object, error) {
return client.ArkV1().Restores(meta_v1.NamespaceAll).List(options)
},
WatchFunc: func(options meta_v1.ListOptions) (watch.Interface, error) {
return client.ArkV1().Restores(meta_v1.NamespaceAll).Watch(options)
},
},
&ark_v1.Restore{},
resyncPeriod,
cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc},
)
return sharedIndexInformer
}
func (f *restoreInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&ark_v1.Restore{}, newRestoreInformer)
}
func (f *restoreInformer) Lister() v1.RestoreLister {
return v1.NewRestoreLister(f.Informer().GetIndexer())
}

Some files were not shown because too many files have changed in this diff Show More