Merge pull request #3889 from zubron/release-1.6.1

Add cherry-pick commits and changelog for v1.6.1
This commit is contained in:
Bridget McErlean
2021-06-22 09:15:31 -04:00
committed by GitHub
36 changed files with 1035 additions and 31895 deletions

View File

@@ -1,3 +1,25 @@
## v1.6.1
### 2021-06-21
### Download
https://github.com/vmware-tanzu/velero/releases/tag/v1.6.1
### Container Image
`velero/velero:v1.6.1`
### Documentation
https://velero.io/docs/v1.6/
### Upgrading
https://velero.io/docs/v1.6/upgrade-to-1.6/
### All Changes
* Fix CR restore regression introduced in 1.6 restore progress. (#3845, @sseago)
* Skip the restore of volumes that originally came from a projected volume when using restic. (#3877, @zubron)
* skip backuping projected volume when using restic (#3866, @alaypatel07)
* 🐛 Fix plugin name derivation from image name (#3711, @ashish-amarnath)
## v1.6.0
### 2021-04-12

2
go.mod
View File

@@ -20,7 +20,7 @@ require (
github.com/hashicorp/go-plugin v0.0.0-20190610192547-a1bc61569a26
github.com/joho/godotenv v1.3.0
github.com/kubernetes-csi/external-snapshotter/client/v4 v4.0.0
github.com/onsi/ginkgo v1.15.2
github.com/onsi/ginkgo v1.16.4
github.com/onsi/gomega v1.10.2
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1

11
go.sum
View File

@@ -214,6 +214,7 @@ github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gobuffalo/flect v0.2.2 h1:PAVD7sp0KOdfswjAw9BpLCU9hXo7wFSzgpQ+zNeks/A=
github.com/gobuffalo/flect v0.2.2/go.mod h1:vmkQwuZYhN5Pc4ljYQZzP+1sq+NEkK+lh20jmEmX3jc=
github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y=
@@ -417,7 +418,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.1/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
@@ -431,8 +431,8 @@ github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.15.2 h1:l77YT15o814C2qVL47NOyjV/6RbaP7kKdrvZnxQ3Org=
github.com/onsi/ginkgo v1.15.2/go.mod h1:Dd6YFfwBW84ETqqtL0CPyPXillHgY6XhQH3uuCCTr/o=
github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
@@ -628,7 +628,6 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200707034311-ab3426394381 h1:VXak5I6aEWmAXeQjA+QSZzlgNrpq9mjcfDemuexIKsU=
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
@@ -685,7 +684,6 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210112080510-489259a85091 h1:DMyOG0U+gKfu8JZzg2UQe9MeaC1X+xQWlAKcRnjxjCw=
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -732,13 +730,11 @@ golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054 h1:HHeAlu5H9b71C+Fx0K+1dGgVFN1DM1/wz4aoGOA5qS8=
golang.org/x/tools v0.0.0-20200616133436-c1934b75d054/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e h1:4nW4NLDYnU28ojHaHO8OVxFHk/aQ33U01a9cjED+pzE=
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -819,7 +815,6 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@@ -45,7 +45,7 @@ func ForPluginContainer(image string, pullPolicy corev1api.PullPolicy) *Containe
// getName returns the 'name' component of a docker
// image that includes the entire string except the registry name, and transforms the combined
// string into a DNS-1123 compatible name.
// string into a RFC-1123 compatible name.
func getName(image string) string {
slashIndex := strings.Index(image, "/")
slashCount := 0
@@ -67,7 +67,14 @@ func getName(image string) string {
end = colonIndex
}
return strings.Replace(image[start:end], "/", "-", -1) // this makes it DNS-1123 compatible
// https://github.com/distribution/distribution/blob/main/docs/spec/api.md#overview
// valid repository names match the regex [a-z0-9]+(?:[._-][a-z0-9]+)*
// image repository names can container [._] but [._] are not allowed in RFC-1123 labels.
// replace '/', '_' and '.' with '-'
re := strings.NewReplacer("/", "-",
"_", "-",
".", "-")
return re.Replace(image[start:end])
}
// Result returns the built Container.

View File

@@ -75,7 +75,17 @@ func TestGetName(t *testing.T) {
{
name: "image name with registry hostname starting with a / will include the registry name ¯\\_(ツ)_/¯",
image: "/gcr.io/my-repo/mystery/another/my-image",
expected: "gcr.io-my-repo-mystery-another-my-image",
expected: "gcr-io-my-repo-mystery-another-my-image",
},
{
name: "image repository names containing _ ",
image: "projects.registry.vmware.com/tanzu_migrator/route-2-httpproxy:myTag",
expected: "tanzu-migrator-route-2-httpproxy",
},
{
name: "image repository names containing . ",
image: "projects.registry.vmware.com/tanzu.migrator/route-2-httpproxy:myTag",
expected: "tanzu-migrator-route-2-httpproxy",
},
}

View File

@@ -19,6 +19,8 @@ package client
import (
"os"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
k8scheme "k8s.io/client-go/kubernetes/scheme"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/pkg/errors"
@@ -45,7 +47,8 @@ type Factory interface {
// DynamicClient returns a Kubernetes dynamic client. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
DynamicClient() (dynamic.Interface, error)
// KubebuilderClient returns a Kubernetes dynamic client. It uses the following priority to specify the cluster
// KubebuilderClient returns a client for the controller runtime framework. It adds Kubernetes and Velero
// types to its scheme. It uses the following priority to specify the cluster
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
KubebuilderClient() (kbclient.Client, error)
// SetBasename changes the basename for an already-constructed client.
@@ -151,6 +154,8 @@ func (f *factory) KubebuilderClient() (kbclient.Client, error) {
scheme := runtime.NewScheme()
velerov1api.AddToScheme(scheme)
k8scheme.AddToScheme(scheme)
apiextv1beta1.AddToScheme(scheme)
kubebuilderClient, err := kbclient.New(clientConfig, kbclient.Options{
Scheme: scheme,
})

View File

@@ -255,10 +255,7 @@ func (o *InstallOptions) Run(c *cobra.Command, f client.Factory) error {
return err
}
resources, err = install.AllResources(vo)
if err != nil {
return err
}
resources = install.AllResources(vo)
}
if _, err := output.PrintWithFormat(c, resources); err != nil {

View File

@@ -26,20 +26,18 @@ import (
"github.com/spf13/pflag"
corev1 "k8s.io/api/core/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
kubeerrs "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/pkg/client"
"github.com/vmware-tanzu/velero/pkg/cmd"
"github.com/vmware-tanzu/velero/pkg/cmd/cli"
"github.com/vmware-tanzu/velero/pkg/install"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
// uninstallOptions collects all the options for uninstalling Velero from a Kubernetes cluster.
@@ -78,9 +76,9 @@ Use '--force' to skip the prompt confirming if you want to uninstall Velero.
}
}
client, extCl, err := kube.GetClusterClient()
kbClient, err := f.KubebuilderClient()
cmd.CheckError(err)
cmd.CheckError(Run(context.Background(), client, extCl, f.Namespace(), o.wait))
cmd.CheckError(Run(context.Background(), kbClient, f.Namespace(), o.wait))
},
}
@@ -89,53 +87,68 @@ Use '--force' to skip the prompt confirming if you want to uninstall Velero.
}
// Run removes all components that were deployed using the Velero install command
func Run(ctx context.Context, client *kubernetes.Clientset, extensionsClient *apiextensionsclientset.Clientset, namespace string, waitToTerminate bool) error {
func Run(ctx context.Context, kbClient kbclient.Client, namespace string, waitToTerminate bool) error {
var errs []error
// namespace
ns, err := client.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
if err != nil {
ns := &corev1.Namespace{}
key := kbclient.ObjectKey{Name: namespace}
if err := kbClient.Get(ctx, key, ns); err != nil {
if apierrors.IsNotFound(err) {
fmt.Printf("Velero installation namespace %q does not exist, skipping.\n", namespace)
fmt.Printf("Velero namespace %q does not exist, skipping.\n", namespace)
} else {
errs = append(errs, errors.WithStack(err))
}
} else {
if ns.Status.Phase == corev1.NamespaceTerminating {
fmt.Printf("Velero installation namespace %q is terminating.\n", namespace)
fmt.Printf("Velero namespace %q is terminating.\n", namespace)
} else {
err = client.CoreV1().Namespaces().Delete(ctx, ns.Name, metav1.DeleteOptions{})
if err != nil {
if err := kbClient.Delete(ctx, ns); err != nil {
errs = append(errs, errors.WithStack(err))
}
}
}
// rolebinding
// ClusterRoleBinding
crb := install.ClusterRoleBinding(namespace)
if err := client.RbacV1().ClusterRoleBindings().Delete(ctx, crb.Name, metav1.DeleteOptions{}); err != nil {
key = kbclient.ObjectKey{Name: crb.Name}
if err := kbClient.Get(ctx, key, crb); err != nil {
if apierrors.IsNotFound(err) {
fmt.Printf("Velero installation clusterrolebinding %q does not exist, skipping.\n", crb.Name)
fmt.Printf("Velero ClusterRoleBinding %q does not exist, skipping.\n", crb.Name)
} else {
errs = append(errs, errors.WithStack(err))
}
} else {
if err := kbClient.Delete(ctx, crb); err != nil {
errs = append(errs, errors.WithStack(err))
}
}
// CRDs
veleroLabels := labels.FormatLabels(install.Labels())
crds, err := extensionsClient.ApiextensionsV1().CustomResourceDefinitions().List(ctx, metav1.ListOptions{
LabelSelector: veleroLabels,
})
if err != nil {
errs = append(errs, errors.WithStack(err))
crdList := apiextv1beta1.CustomResourceDefinitionList{}
opts := kbclient.ListOptions{
Namespace: namespace,
Raw: &metav1.ListOptions{
LabelSelector: veleroLabels,
},
}
if len(crds.Items) == 0 {
fmt.Print("Velero CRDs do not exist, skipping.\n")
if err := kbClient.List(context.Background(), &crdList, &opts); err != nil {
errs = append(errs, errors.WithStack(err))
} else {
for _, removeCRD := range crds.Items {
if err = extensionsClient.ApiextensionsV1().CustomResourceDefinitions().Delete(ctx, removeCRD.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil {
err2 := errors.WithMessagef(err, "Uninstall failed removing CRD %s", removeCRD.ObjectMeta.Name)
errs = append(errs, errors.WithStack(err2))
if len(crdList.Items) == 0 {
fmt.Print("Velero CRDs do not exist, skipping.\n")
} else {
veleroLabelSelector := labels.SelectorFromSet(install.Labels())
opts := []kbclient.DeleteAllOfOption{
kbclient.InNamespace(namespace),
kbclient.MatchingLabelsSelector{
Selector: veleroLabelSelector,
},
}
crd := &apiextv1beta1.CustomResourceDefinition{}
if err := kbClient.DeleteAllOf(ctx, crd, opts...); err != nil {
errs = append(errs, errors.WithStack(err))
}
}
}
@@ -147,7 +160,7 @@ func Run(ctx context.Context, client *kubernetes.Clientset, extensionsClient *ap
defer cancel()
checkFunc := func() {
_, err := client.CoreV1().Namespaces().Get(ctx, namespace, metav1.GetOptions{})
err := kbClient.Get(ctx, key, ns)
if err != nil {
if apierrors.IsNotFound(err) {
fmt.Print("\n")

View File

@@ -245,7 +245,7 @@ func AllCRDs() *unstructured.UnstructuredList {
// AllResources returns a list of all resources necessary to install Velero, in the appropriate order, into a Kubernetes cluster.
// Items are unstructured, since there are different data types returned.
func AllResources(o *VeleroOptions) (*unstructured.UnstructuredList, error) {
func AllResources(o *VeleroOptions) *unstructured.UnstructuredList {
resources := AllCRDs()
ns := Namespace(o.Namespace)
@@ -317,5 +317,5 @@ func AllResources(o *VeleroOptions) (*unstructured.UnstructuredList, error) {
appendUnstructured(resources, ds)
}
return resources, nil
return resources
}

View File

@@ -100,9 +100,20 @@ func isPVBMatchPod(pvb *velerov1api.PodVolumeBackup, podName string, namespace s
return podName == pvb.Spec.Pod.Name && namespace == pvb.Spec.Pod.Namespace
}
// volumeIsProjected checks if the given volume exists in the list of podVolumes
// and returns true if the volume has a projected source
func volumeIsProjected(volumeName string, podVolumes []corev1api.Volume) bool {
for _, volume := range podVolumes {
if volume.Name == volumeName && volume.Projected != nil {
return true
}
}
return false
}
// GetVolumeBackupsForPod returns a map, of volume name -> snapshot id,
// of the PodVolumeBackups that exist for the provided pod.
func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod metav1.Object, sourcePodNs string) map[string]string {
func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod *corev1api.Pod, sourcePodNs string) map[string]string {
volumes := make(map[string]string)
for _, pvb := range podVolumeBackups {
@@ -116,6 +127,13 @@ func GetVolumeBackupsForPod(podVolumeBackups []*velerov1api.PodVolumeBackup, pod
continue
}
// If the volume came from a projected source, skip its restore.
// This allows backups affected by https://github.com/vmware-tanzu/velero/issues/3863
// to be restored successfully.
if volumeIsProjected(pvb.Spec.Volume, pod.Spec.Volumes) {
continue
}
volumes[pvb.Spec.Volume] = pvb.Status.SnapshotID
}
@@ -183,6 +201,10 @@ func GetPodVolumesUsingRestic(pod *corev1api.Pod, defaultVolumesToRestic bool) [
if pv.ConfigMap != nil {
continue
}
// don't backup volumes mounted as projected volumes, all data in those come from kube state.
if pv.Projected != nil {
continue
}
// don't backup volumes that are included in the exclude list.
if contains(volsToExclude, pv.Name) {
continue

View File

@@ -37,6 +37,7 @@ func TestGetVolumeBackupsForPod(t *testing.T) {
tests := []struct {
name string
podVolumeBackups []*velerov1api.PodVolumeBackup
podVolumes []corev1api.Volume
podAnnotations map[string]string
podName string
sourcePodNs string
@@ -127,6 +128,30 @@ func TestGetVolumeBackupsForPod(t *testing.T) {
sourcePodNs: "TestNS",
expected: map[string]string{"pvbtest1-foo": "snapshot1"},
},
{
name: "volumes from PVBs that correspond to a pod volume from a projected source are not returned",
podVolumeBackups: []*velerov1api.PodVolumeBackup{
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot1").Volume("pvb-non-projected").Result(),
builder.ForPodVolumeBackup("velero", "pvb-1").PodName("TestPod").PodNamespace("TestNS").SnapshotID("snapshot2").Volume("pvb-projected").Result(),
},
podVolumes: []corev1api.Volume{
{
Name: "pvb-non-projected",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "pvb-projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
},
podName: "TestPod",
sourcePodNs: "TestNS",
expected: map[string]string{"pvb-non-projected": "snapshot1"},
},
}
for _, test := range tests {
@@ -134,6 +159,7 @@ func TestGetVolumeBackupsForPod(t *testing.T) {
pod := &corev1api.Pod{}
pod.Annotations = test.podAnnotations
pod.Name = test.podName
pod.Spec.Volumes = test.podVolumes
res := GetVolumeBackupsForPod(test.podVolumeBackups, pod, test.sourcePodNs)
assert.Equal(t, test.expected, res)
@@ -507,6 +533,41 @@ func TestGetPodVolumesUsingRestic(t *testing.T) {
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
{
name: "should exclude projected volumes",
defaultVolumesToRestic: true,
pod: &corev1api.Pod{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
VolumesToExcludeAnnotation: "nonResticPV1,nonResticPV2,nonResticPV3",
},
},
Spec: corev1api.PodSpec{
Volumes: []corev1api.Volume{
{Name: "resticPV1"}, {Name: "resticPV2"}, {Name: "resticPV3"},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{
Sources: []corev1api.VolumeProjection{{
Secret: &corev1api.SecretProjection{
LocalObjectReference: corev1api.LocalObjectReference{},
Items: nil,
Optional: nil,
},
DownwardAPI: nil,
ConfigMap: nil,
ServiceAccountToken: nil,
}},
DefaultMode: nil,
},
},
},
},
},
},
expected: []string{"resticPV1", "resticPV2", "resticPV3"},
},
}
for _, tc := range testCases {
@@ -594,3 +655,78 @@ func TestIsPVBMatchPod(t *testing.T) {
}
}
func TestVolumeIsProjected(t *testing.T) {
testCases := []struct {
name string
volumeName string
podVolumes []corev1api.Volume
expected bool
}{
{
name: "volume name not in list of volumes",
volumeName: "missing-volume",
podVolumes: []corev1api.Volume{
{
Name: "non-projected",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
},
expected: false,
},
{
name: "volume name in list of volumes but not projected",
volumeName: "non-projected",
podVolumes: []corev1api.Volume{
{
Name: "non-projected",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
},
expected: false,
},
{
name: "volume name in list of volumes and projected",
volumeName: "projected",
podVolumes: []corev1api.Volume{
{
Name: "non-projected",
VolumeSource: corev1api.VolumeSource{
PersistentVolumeClaim: &corev1api.PersistentVolumeClaimVolumeSource{},
},
},
{
Name: "projected",
VolumeSource: corev1api.VolumeSource{
Projected: &corev1api.ProjectedVolumeSource{},
},
},
},
expected: true,
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
actual := volumeIsProjected(tc.volumeName, tc.podVolumes)
assert.Equal(t, tc.expected, actual)
})
}
}

View File

@@ -377,6 +377,10 @@ func getOrderedResources(resourcePriorities []string, backupResources map[string
return append(resourcePriorities, orderedBackupResources...)
}
type progressUpdate struct {
totalItems, itemsRestored int
}
func (ctx *restoreContext) execute() (Result, Result) {
warnings, errs := Result{}, Result{}
@@ -409,14 +413,6 @@ func (ctx *restoreContext) execute() (Result, Result) {
}
}
selectedResourceCollection, w, e := ctx.getOrderedResourceCollection(backupResources)
warnings.Merge(&w)
errs.Merge(&e)
type progressUpdate struct {
totalItems, itemsRestored int
}
update := make(chan progressUpdate)
quit := make(chan struct{})
@@ -456,94 +452,69 @@ func (ctx *restoreContext) execute() (Result, Result) {
}()
// totalItems: previously discovered items, i: iteration counter.
totalItems, i, existingNamespaces := 0, 0, sets.NewString()
totalItems, processedItems, existingNamespaces := 0, 0, sets.NewString()
// First restore CRDs. This is needed so that they are available in the cluster
// when getOrderedResourceCollection is called again on the whole backup and
// needs to validate all resources listed.
crdResourceCollection, processedResources, w, e := ctx.getOrderedResourceCollection(
backupResources,
make([]restoreableResource, 0),
sets.NewString(),
[]string{"customresourcedefinitions"},
false,
)
warnings.Merge(&w)
errs.Merge(&e)
for _, selectedResource := range crdResourceCollection {
totalItems += selectedResource.totalItems
}
for _, selectedResource := range crdResourceCollection {
var w, e Result
// Restore this resource
processedItems, w, e = ctx.processSelectedResource(
selectedResource,
totalItems,
processedItems,
existingNamespaces,
update,
)
warnings.Merge(&w)
errs.Merge(&e)
}
// Restore everything else
selectedResourceCollection, _, w, e := ctx.getOrderedResourceCollection(
backupResources,
crdResourceCollection,
processedResources,
ctx.resourcePriorities,
true,
)
warnings.Merge(&w)
errs.Merge(&e)
// reset processedItems and totalItems before processing full resource list
processedItems = 0
totalItems = 0
for _, selectedResource := range selectedResourceCollection {
totalItems += selectedResource.totalItems
}
for _, selectedResource := range selectedResourceCollection {
groupResource := schema.ParseGroupResource(selectedResource.resource)
for namespace, selectedItems := range selectedResource.selectedItemsByNamespace {
for _, selectedItem := range selectedItems {
// If we don't know whether this namespace exists yet, attempt to create
// it in order to ensure it exists. Try to get it from the backup tarball
// (in order to get any backed-up metadata), but if we don't find it there,
// create a blank one.
if namespace != "" && !existingNamespaces.Has(selectedItem.targetNamespace) {
logger := ctx.log.WithField("namespace", namespace)
ns := getNamespace(
logger,
archive.GetItemFilePath(ctx.restoreDir, "namespaces", "", namespace),
selectedItem.targetNamespace,
)
_, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady(
ns,
ctx.namespaceClient,
ctx.resourceTerminatingTimeout,
)
if err != nil {
errs.AddVeleroError(err)
continue
}
// Add the newly created namespace to the list of restored items.
if nsCreated {
itemKey := velero.ResourceIdentifier{
GroupResource: kuberesource.Namespaces,
Namespace: ns.Namespace,
Name: ns.Name,
}
ctx.restoredItems[itemKey] = struct{}{}
}
// Keep track of namespaces that we know exist so we don't
// have to try to create them multiple times.
existingNamespaces.Insert(selectedItem.targetNamespace)
}
obj, err := archive.Unmarshal(ctx.fileSystem, selectedItem.path)
if err != nil {
errs.Add(
selectedItem.targetNamespace,
fmt.Errorf(
"error decoding %q: %v",
strings.Replace(selectedItem.path, ctx.restoreDir+"/", "", -1),
err,
),
)
continue
}
w, e := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace)
warnings.Merge(&w)
errs.Merge(&e)
i++
// totalItems keeps the count of items previously known. There
// may be additional items restored by plugins. We want to include
// the additional items by looking at restoredItems at the same
// time, we don't want previously known items counted twice as
// they are present in both restoredItems and totalItems.
actualTotalItems := len(ctx.restoredItems) + (totalItems - i)
update <- progressUpdate{
totalItems: actualTotalItems,
itemsRestored: len(ctx.restoredItems),
}
}
}
// If we just restored custom resource definitions (CRDs), refresh
// discovery because the restored CRDs may have created new APIs that
// didn't previously exist in the cluster, and we want to be able to
// resolve & restore instances of them in subsequent loop iterations.
if groupResource == kuberesource.CustomResourceDefinitions {
if err := ctx.discoveryHelper.Refresh(); err != nil {
warnings.Add("", errors.Wrap(err, "refresh discovery after restoring CRDs"))
}
}
var w, e Result
// Restore this resource
processedItems, w, e = ctx.processSelectedResource(
selectedResource,
totalItems,
processedItems,
existingNamespaces,
update,
)
warnings.Merge(&w)
errs.Merge(&e)
}
// Close the progress update channel.
@@ -605,6 +576,107 @@ func (ctx *restoreContext) execute() (Result, Result) {
return warnings, errs
}
// Process and restore one restoreableResource from the backup and update restore progress
// metadata. At this point, the resource has already been validated and counted for inclusion
// in the expected total restore count.
func (ctx *restoreContext) processSelectedResource(
selectedResource restoreableResource,
totalItems int,
processedItems int,
existingNamespaces sets.String,
update chan progressUpdate,
) (int, Result, Result) {
warnings, errs := Result{}, Result{}
groupResource := schema.ParseGroupResource(selectedResource.resource)
for namespace, selectedItems := range selectedResource.selectedItemsByNamespace {
for _, selectedItem := range selectedItems {
// If we don't know whether this namespace exists yet, attempt to create
// it in order to ensure it exists. Try to get it from the backup tarball
// (in order to get any backed-up metadata), but if we don't find it there,
// create a blank one.
if namespace != "" && !existingNamespaces.Has(selectedItem.targetNamespace) {
logger := ctx.log.WithField("namespace", namespace)
ns := getNamespace(
logger,
archive.GetItemFilePath(ctx.restoreDir, "namespaces", "", namespace),
selectedItem.targetNamespace,
)
_, nsCreated, err := kube.EnsureNamespaceExistsAndIsReady(
ns,
ctx.namespaceClient,
ctx.resourceTerminatingTimeout,
)
if err != nil {
errs.AddVeleroError(err)
continue
}
// Add the newly created namespace to the list of restored items.
if nsCreated {
itemKey := velero.ResourceIdentifier{
GroupResource: kuberesource.Namespaces,
Namespace: ns.Namespace,
Name: ns.Name,
}
ctx.restoredItems[itemKey] = struct{}{}
}
// Keep track of namespaces that we know exist so we don't
// have to try to create them multiple times.
existingNamespaces.Insert(selectedItem.targetNamespace)
}
obj, err := archive.Unmarshal(ctx.fileSystem, selectedItem.path)
if err != nil {
errs.Add(
selectedItem.targetNamespace,
fmt.Errorf(
"error decoding %q: %v",
strings.Replace(selectedItem.path, ctx.restoreDir+"/", "", -1),
err,
),
)
continue
}
w, e := ctx.restoreItem(obj, groupResource, selectedItem.targetNamespace)
warnings.Merge(&w)
errs.Merge(&e)
processedItems++
// totalItems keeps the count of items previously known. There
// may be additional items restored by plugins. We want to include
// the additional items by looking at restoredItems at the same
// time, we don't want previously known items counted twice as
// they are present in both restoredItems and totalItems.
actualTotalItems := len(ctx.restoredItems) + (totalItems - processedItems)
update <- progressUpdate{
totalItems: actualTotalItems,
itemsRestored: len(ctx.restoredItems),
}
ctx.log.WithFields(map[string]interface{}{
"progress": "",
"resource": groupResource.String(),
"namespace": selectedItem.targetNamespace,
"name": selectedItem.name,
}).Infof("Restored %d items out of an estimated total of %d (estimate will change throughout the restore)", len(ctx.restoredItems), actualTotalItems)
}
}
// If we just restored custom resource definitions (CRDs), refresh
// discovery because the restored CRDs may have created new APIs that
// didn't previously exist in the cluster, and we want to be able to
// resolve & restore instances of them in subsequent loop iterations.
if groupResource == kuberesource.CustomResourceDefinitions {
if err := ctx.discoveryHelper.Refresh(); err != nil {
warnings.Add("", errors.Wrap(err, "refresh discovery after restoring CRDs"))
}
}
return processedItems, warnings, errs
}
// getNamespace returns a namespace API object that we should attempt to
// create before restoring anything into it. It will come from the backup
// tarball if it exists, else will be a new one. If from the tarball, it
@@ -1232,8 +1304,16 @@ func (ctx *restoreContext) restoreItem(obj *unstructured.Unstructured, groupReso
return warnings, errs
}
if groupResource == kuberesource.Pods && len(restic.GetVolumeBackupsForPod(ctx.podVolumeBackups, obj, originalNamespace)) > 0 {
restorePodVolumeBackups(ctx, createdObj, originalNamespace)
if groupResource == kuberesource.Pods {
pod := new(v1.Pod)
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), pod); err != nil {
errs.Add(namespace, err)
return warnings, errs
}
if len(restic.GetVolumeBackupsForPod(ctx.podVolumeBackups, pod, originalNamespace)) > 0 {
restorePodVolumeBackups(ctx, createdObj, originalNamespace)
}
}
if groupResource == kuberesource.Pods {
@@ -1559,10 +1639,14 @@ type restoreableItem struct {
// identifiers, applies resource include/exclude criteria, and Kubernetes
// selectors to make a list of resources to be actually restored preserving the
// original order.
func (ctx *restoreContext) getOrderedResourceCollection(backupResources map[string]*archive.ResourceItems) ([]restoreableResource, Result, Result) {
func (ctx *restoreContext) getOrderedResourceCollection(
backupResources map[string]*archive.ResourceItems,
restoreResourceCollection []restoreableResource,
processedResources sets.String,
resourcePriorities []string,
includeAllResources bool,
) ([]restoreableResource, sets.String, Result, Result) {
var warnings, errs Result
processedResources := sets.NewString()
restoreResourceCollection := make([]restoreableResource, 0)
// Iterate through an ordered list of resources to restore, checking each
// one to see if it should be restored. Note that resources *may* be in this
// list twice, i.e. once due to being a prioritized resource, and once due
@@ -1577,7 +1661,13 @@ func (ctx *restoreContext) getOrderedResourceCollection(backupResources map[stri
// Since we keep track of the fully-resolved group-resources that we *have*
// restored, we won't try to restore a resource twice even if it's in the
// ordered list twice.
for _, resource := range getOrderedResources(ctx.resourcePriorities, backupResources) {
var resourceList []string
if includeAllResources {
resourceList = getOrderedResources(resourcePriorities, backupResources)
} else {
resourceList = resourcePriorities
}
for _, resource := range resourceList {
// try to resolve the resource via discovery to a complete group/version/resource
gvr, _, err := ctx.discoveryHelper.ResourceFor(schema.ParseGroupResource(resource).WithVersion(""))
if err != nil {
@@ -1650,7 +1740,7 @@ func (ctx *restoreContext) getOrderedResourceCollection(backupResources map[stri
// record that we've restored the resource
processedResources.Insert(groupResource.String())
}
return restoreResourceCollection, warnings, errs
return restoreResourceCollection, processedResources, warnings, errs
}
// getSelectedRestoreableItems applies Kubernetes selectors on individual items

View File

@@ -24,15 +24,12 @@ import (
"github.com/pkg/errors"
corev1api "k8s.io/api/core/v1"
apiextv1beta1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
corev1client "k8s.io/client-go/kubernetes/typed/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/clientcmd"
)
// NamespaceAndName returns a string in the format <namespace>/<name>
@@ -220,26 +217,3 @@ func IsUnstructuredCRDReady(crd *unstructured.Unstructured) (bool, error) {
return (isEstablished && namesAccepted), nil
}
// GetClusterClient instantiates and returns a client for the cluster.
func GetClusterClient() (*kubernetes.Clientset, *apiextensionsclientset.Clientset, error) {
loadingRules := clientcmd.NewDefaultClientConfigLoadingRules()
configOverrides := &clientcmd.ConfigOverrides{}
kubeConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, configOverrides)
clientConfig, err := kubeConfig.ClientConfig()
if err != nil {
return nil, nil, errors.WithStack(err)
}
client, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, nil, errors.WithStack(err)
}
extensionClientSet, err := apiextensionsclientset.NewForConfig(clientConfig)
if err != nil {
return nil, nil, errors.WithStack(err)
}
return client, extensionClientSet, nil
}

View File

@@ -459,7 +459,7 @@ cqlsh:demodb> select * from emp;
cqlsh:demodb>
```
It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubenetes objects for the Cassandra application, as well as restored the database and table contents.
It looks like the restore has been successful. Velero v1.1 has successfully restored the Kubernetes objects for the Cassandra application, as well as restored the database and table contents.
## Feedback and Participation

View File

@@ -48,7 +48,7 @@ OUTPUT_DIR := _output/$(GOOS)/$(GOARCH)/bin
GINKGO_FOCUS ?=
VELERO_CLI ?=$$(pwd)/../../_output/bin/$(GOOS)/$(GOARCH)/velero
VELERO_IMAGE ?= velero/velero:main
VELERO_NAMESPACE ?=
VELERO_NAMESPACE ?= velero
CREDS_FILE ?=
BSL_BUCKET ?=
BSL_PREFIX ?=

View File

@@ -88,9 +88,9 @@ For example, E2E tests can be run from Velero repository roots using the command
BSL_CONFIG="resourceGroup=$AZURE_BACKUP_RESOURCE_GROUP,storageAccount=$AZURE_STORAGE_ACCOUNT_ID,subscriptionId=$AZURE_BACKUP_SUBSCRIPTION_ID" BSL_BUCKET=<BUCKET_FOR_E2E_TEST_BACKUP> CREDS_FILE=/path/to/azure-creds CLOUD_PROVIDER=azure make test-e2e
```
Please refer to `velero-plugin-for-microsoft-azure` documentation for instruction to [set up permissions for Velero](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#set-permissions-for-velero) and to [set up azure storage account and blob container](https://github.com/vmware-tanzu/velero-plugin-for-microsoft-azure#setup-azure-storage-account-and-blob-container)
1. Run Ginko-focused Restore Multi-API Groups tests using an image built for PR #3133 and Minio as the backup storage location:
1. Run Ginko-focused Restore Multi-API Groups tests using Minio as the backup storage location:
```bash
BSL_CONFIG="region=minio,s3ForcePathStyle=\"true\",s3Url=http://192.168.1.124:9000" BSL_PREFIX=veldat BSL_BUCKET=velero CREDS_FILE=~/go/src/github.com/vmware-tanzu/velero/frankie-secrets/credentials-minio PLUGIN_PROVIDER=aws VELERO_IMAGE=projects.registry.vmware.com/tanzu_migrator/velero-pr3133:0.0.5 GINKGO_FOCUS="API group versions" make test-e2e
BSL_CONFIG="region=minio,s3ForcePathStyle=\"true\",s3Url=<ip address>:9000" BSL_PREFIX=<prefix> BSL_BUCKET=<bucket> CREDS_FILE=<absolute path to minio credentials file> CLOUD_PROVIDER=kind OBJECT_STORE_PROVIDER=aws VELERO_NAMESPACE="velero" GINKGO_FOCUS="API group versions" make test-e2e
```
1. Run Velero tests in a kind cluster with AWS (or Minio) as the storage provider and use Microsoft Azure as the storage provider for an additional Backup Storage Location:
```bash
@@ -105,3 +105,11 @@ For example, E2E tests can be run from Velero repository roots using the command
Velero E2E tests uses [Ginkgo](https://onsi.github.io/ginkgo/) testing framework which allows a subset of the tests to be run using the [`-focus` and `-skip`](https://onsi.github.io/ginkgo/#focused-specs) flags to ginkgo.
The `-focus` flag is passed to ginkgo using the `GINKGO_FOCUS` make variable. This can be used to focus on specific tests.
## Adding tests
### API clients
When adding a test, aim to instantiate an API client only once at the beginning of the test. There is a constructor `newTestClient` that facilitates the configuration and instantiation of clients. Also, please use the `kubebuilder` runtime controller client for any new test, as we will phase out usage of `client-go` API clients.
### Tips
Look for the ⛵ emoji printed at the end of each install and uninstall log. There should not be two install/unintall in a row, and there should be tests between an install and an uninstall.

View File

@@ -1,18 +1,28 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"context"
"flag"
"fmt"
"time"
"github.com/google/uuid"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
apiextensionsclientset "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/util/kube"
)
var (
@@ -34,12 +44,12 @@ func backup_restore_with_restic() {
func backup_restore_test(useVolumeSnapshots bool) {
var (
client *kubernetes.Clientset
extensionsClient *apiextensionsclientset.Clientset
backupName string
restoreName string
backupName, restoreName string
)
client, err := newTestClient()
Expect(err).To(Succeed(), "Failed to instantiate cluster client for backup tests")
BeforeEach(func() {
if useVolumeSnapshots && cloudProvider == "kind" {
Skip("Volume snapshots not supported on kind")
@@ -49,21 +59,16 @@ func backup_restore_test(useVolumeSnapshots bool) {
uuidgen, err = uuid.NewRandom()
Expect(err).To(Succeed())
if installVelero {
Expect(VeleroInstall(context.Background(), veleroImage, veleroNamespace, cloudProvider, objectStoreProvider, useVolumeSnapshots,
Expect(veleroInstall(context.Background(), veleroImage, veleroNamespace, cloudProvider, objectStoreProvider, useVolumeSnapshots,
cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, vslConfig, "")).To(Succeed())
}
client, extensionsClient, err = kube.GetClusterClient()
Expect(err).To(Succeed(), "Failed to instantiate cluster client")
})
AfterEach(func() {
if installVelero {
timeoutCTX, _ := context.WithTimeout(context.Background(), time.Minute)
err := VeleroUninstall(timeoutCTX, client, extensionsClient, veleroNamespace)
err = veleroUninstall(context.Background(), client.kubebuilder, installVelero, veleroNamespace)
Expect(err).To(Succeed())
}
})
When("kibishii is the sample workload", func() {
@@ -72,7 +77,7 @@ func backup_restore_test(useVolumeSnapshots bool) {
restoreName = "restore-" + uuidgen.String()
// Even though we are using Velero's CloudProvider plugin for object storage, the kubernetes cluster is running on
// KinD. So use the kind installation for Kibishii.
Expect(RunKibishiiTests(client, cloudProvider, veleroCLI, veleroNamespace, backupName, restoreName, "", useVolumeSnapshots)).To(Succeed(),
Expect(runKibishiiTests(client, cloudProvider, veleroCLI, veleroNamespace, backupName, restoreName, "", useVolumeSnapshots)).To(Succeed(),
"Failed to successfully backup and restore Kibishii namespace")
})
@@ -89,7 +94,7 @@ func backup_restore_test(useVolumeSnapshots bool) {
Skip("no additional BSL credentials given, not running multiple BackupStorageLocation with unique credentials tests")
}
Expect(VeleroAddPluginsForProvider(context.TODO(), veleroCLI, veleroNamespace, additionalBSLProvider)).To(Succeed())
Expect(veleroAddPluginsForProvider(context.TODO(), veleroCLI, veleroNamespace, additionalBSLProvider)).To(Succeed())
// Create Secret for additional BSL
secretName := fmt.Sprintf("bsl-credentials-%s", uuidgen)
@@ -98,11 +103,11 @@ func backup_restore_test(useVolumeSnapshots bool) {
secretKey: additionalBSLCredentials,
}
Expect(CreateSecretFromFiles(context.TODO(), client, veleroNamespace, secretName, files)).To(Succeed())
Expect(createSecretFromFiles(context.TODO(), client, veleroNamespace, secretName, files)).To(Succeed())
// Create additional BSL using credential
additionalBsl := fmt.Sprintf("bsl-%s", uuidgen)
Expect(VeleroCreateBackupLocation(context.TODO(),
Expect(veleroCreateBackupLocation(context.TODO(),
veleroCLI,
veleroNamespace,
additionalBsl,
@@ -120,7 +125,7 @@ func backup_restore_test(useVolumeSnapshots bool) {
backupName = fmt.Sprintf("backup-%s-%s", bsl, uuidgen)
restoreName = fmt.Sprintf("restore-%s-%s", bsl, uuidgen)
Expect(RunKibishiiTests(client, cloudProvider, veleroCLI, veleroNamespace, backupName, restoreName, bsl, useVolumeSnapshots)).To(Succeed(),
Expect(runKibishiiTests(client, cloudProvider, veleroCLI, veleroNamespace, backupName, restoreName, bsl, useVolumeSnapshots)).To(Succeed(),
"Failed to successfully backup and restore Kibishii namespace using BSL %s", bsl)
}
})

79
test/e2e/client.go Normal file
View File

@@ -0,0 +1,79 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
"k8s.io/client-go/kubernetes"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
"github.com/vmware-tanzu/velero/pkg/client"
)
// testClient contains different API clients that are in use throughout
// the e2e tests.
type testClient struct {
kubebuilder kbclient.Client
// clientGo returns a client-go API client.
//
// Deprecated, TODO(2.0): presuming all controllers and resources are converted to the
// controller runtime framework by v2.0, it is the intent to remove all
// client-go API clients. Please use the controller runtime to make API calls for tests.
clientGo kubernetes.Interface
// dynamicFactory returns a client-go API client for retrieving dynamic clients
// for GroupVersionResources and GroupVersionKinds.
//
// Deprecated, TODO(2.0): presuming all controllers and resources are converted to the
// controller runtime framework by v2.0, it is the intent to remove all
// client-go API clients. Please use the controller runtime to make API calls for tests.
dynamicFactory client.DynamicFactory
}
// newTestClient returns a set of ready-to-use API clients.
func newTestClient() (testClient, error) {
config, err := client.LoadConfig()
if err != nil {
return testClient{}, err
}
f := client.NewFactory("e2e", config)
clientGo, err := f.KubeClient()
if err != nil {
return testClient{}, err
}
kb, err := f.KubebuilderClient()
if err != nil {
return testClient{}, err
}
dynamicClient, err := f.DynamicClient()
if err != nil {
return testClient{}, err
}
factory := client.NewDynamicFactory(dynamicClient)
return testClient{
kubebuilder: kb,
clientGo: clientGo,
dynamicFactory: factory,
}, nil
}

View File

@@ -1,3 +1,19 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
@@ -13,30 +29,29 @@ import (
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/builder"
)
// EnsureClusterExists returns whether or not a kubernetes cluster exists for tests to be run on.
func EnsureClusterExists(ctx context.Context) error {
// ensureClusterExists returns whether or not a kubernetes cluster exists for tests to be run on.
func ensureClusterExists(ctx context.Context) error {
return exec.CommandContext(ctx, "kubectl", "cluster-info").Run()
}
// CreateNamespace creates a kubernetes namespace
func CreateNamespace(ctx context.Context, client *kubernetes.Clientset, namespace string) error {
// createNamespace creates a kubernetes namespace
func createNamespace(ctx context.Context, client testClient, namespace string) error {
ns := builder.ForNamespace(namespace).Result()
_, err := client.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
_, err := client.clientGo.CoreV1().Namespaces().Create(ctx, ns, metav1.CreateOptions{})
if apierrors.IsAlreadyExists(err) {
return nil
}
return err
}
// WaitForNamespaceDeletion Waits for namespace to be deleted.
func WaitForNamespaceDeletion(interval, timeout time.Duration, client *kubernetes.Clientset, ns string) error {
// waitForNamespaceDeletion waits for namespace to be deleted.
func waitForNamespaceDeletion(interval, timeout time.Duration, client testClient, ns string) error {
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
_, err := client.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{})
_, err := client.clientGo.CoreV1().Namespaces().Get(context.TODO(), ns, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return true, nil
@@ -49,7 +64,7 @@ func WaitForNamespaceDeletion(interval, timeout time.Duration, client *kubernete
return err
}
func CreateSecretFromFiles(ctx context.Context, client *kubernetes.Clientset, namespace string, name string, files map[string]string) error {
func createSecretFromFiles(ctx context.Context, client testClient, namespace string, name string, files map[string]string) error {
data := make(map[string][]byte)
for key, filePath := range files {
@@ -62,19 +77,17 @@ func CreateSecretFromFiles(ctx context.Context, client *kubernetes.Clientset, na
}
secret := builder.ForSecret(namespace, name).Data(data).Result()
_, err := client.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
_, err := client.clientGo.CoreV1().Secrets(namespace).Create(ctx, secret, metav1.CreateOptions{})
return err
}
/*
Waits until all of the pods have gone to PodRunning state
*/
func WaitForPods(ctx context.Context, client *kubernetes.Clientset, namespace string, pods []string) error {
// waitForPods waits until all of the pods have gone to PodRunning state
func waitForPods(ctx context.Context, client testClient, namespace string, pods []string) error {
timeout := 10 * time.Minute
interval := 5 * time.Second
err := wait.PollImmediate(interval, timeout, func() (bool, error) {
for _, podName := range pods {
checkPod, err := client.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
checkPod, err := client.clientGo.CoreV1().Pods(namespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return false, errors.WithMessage(err, fmt.Sprintf("Failed to verify pod %s/%s is %s", namespace, podName, corev1api.PodRunning))
}

View File

@@ -1,3 +1,19 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (

View File

@@ -1,3 +1,19 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
@@ -5,24 +21,16 @@ import (
"encoding/json"
"fmt"
"os/exec"
"regexp"
"strconv"
"strings"
"time"
"github.com/google/uuid"
"github.com/vmware-tanzu/velero/pkg/util/kube"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"github.com/pkg/errors"
corev1api "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"github.com/vmware-tanzu/velero/pkg/builder"
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
@@ -30,156 +38,142 @@ import (
var _ = Describe("[APIGroup] Velero tests with various CRD API group versions", func() {
var (
resource, group string
certMgrCRD map[string]string
client *kubernetes.Clientset
extensionsClient *apiextensionsclient.Clientset
err error
ctx = context.Background()
resource, group string
err error
ctx = context.Background()
)
client, err := newTestClient()
Expect(err).To(Succeed(), "Failed to instantiate cluster client for group version tests")
BeforeEach(func() {
resource = "rockbands"
group = "music.example.io"
certMgrCRD = map[string]string{
"url": "testdata/enable_api_group_versions/cert-manager.yaml",
"namespace": "cert-manager",
}
client, extensionsClient, err = kube.GetClusterClient() // Currently we ignore the API extensions client
Expect(err).NotTo(HaveOccurred())
err = InstallCRD(ctx, certMgrCRD["url"], certMgrCRD["namespace"])
Expect(err).NotTo(HaveOccurred())
uuidgen, err = uuid.NewRandom()
Expect(err).NotTo(HaveOccurred())
// TODO: install Velero once for the test suite once feature flag is
// removed and velero installation becomes the same as other e2e tests.
if installVelero {
err = veleroInstall(
context.Background(),
veleroImage,
veleroNamespace,
cloudProvider,
objectStoreProvider,
false,
cloudCredentialsFile,
bslBucket,
bslPrefix,
bslConfig,
vslConfig,
"EnableAPIGroupVersions", // TODO: remove when feature flag is removed
)
Expect(err).NotTo(HaveOccurred())
}
})
AfterEach(func() {
cmd := exec.CommandContext(ctx, "kubectl", "delete", "namespace", "music-system")
_, _, _ = veleroexec.RunCommand(cmd)
fmt.Printf("Clean up resource: kubectl delete crd %s.%s\n", resource, group)
cmd := exec.CommandContext(ctx, "kubectl", "delete", "crd", resource+"."+group)
_, stderr, err := veleroexec.RunCommand(cmd)
if strings.Contains(stderr, "NotFound") {
fmt.Printf("Ignore error: %v\n", stderr)
err = nil
}
Expect(err).NotTo(HaveOccurred())
cmd = exec.CommandContext(ctx, "kubectl", "delete", "crd", "rockbands.music.example.io")
_, _, _ = veleroexec.RunCommand(cmd)
err = veleroUninstall(ctx, client.kubebuilder, installVelero, veleroNamespace)
Expect(err).NotTo(HaveOccurred())
_ = DeleteCRD(ctx, certMgrCRD["url"], certMgrCRD["namespace"])
})
Context("When EnableAPIGroupVersions flag is set", func() {
It("Should back up API group version and restore by version priority", func() {
Expect(RunEnableAPIGroupVersionsTests(
Expect(runEnableAPIGroupVersionsTests(
ctx,
client,
resource,
group,
client,
extensionsClient,
)).To(Succeed(), "Failed to successfully backup and restore multiple API Groups")
})
})
})
func RunEnableAPIGroupVersionsTests(ctx context.Context, resource, group string, client *kubernetes.Clientset,
extensionsClient *apiextensionsclient.Clientset) error {
func runEnableAPIGroupVersionsTests(ctx context.Context, client testClient, resource, group string) error {
tests := []struct {
name string
namespaces []string
srcCRD map[string]string
srcCrdYaml string
srcCRs map[string]string
tgtCRD map[string]string
tgtCrdYaml string
tgtVer string
cm *corev1api.ConfigMap
gvs map[string][]string
want map[string]map[string]string
}{
{
name: "Target and source cluster preferred versions match; Preferred version v1 is restored (Priority 1, Case A).",
srcCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-a-source.yaml",
"namespace": "music-system",
},
name: "Target and source cluster preferred versions match; Preferred version v1 is restored (Priority 1, Case A).",
srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml",
srcCRs: map[string]string{
"v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml",
"v1alpha1": "testdata/enable_api_group_versions/music_v1alpha1_rockband.yaml",
},
tgtCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-a-target.yaml",
"namespace": "music-system",
},
tgtVer: "v1",
cm: nil,
tgtCrdYaml: "testdata/enable_api_group_versions/case-a-target.yaml",
tgtVer: "v1",
cm: nil,
want: map[string]map[string]string{
"annotations": {
"rockbands.music.example.io/originalVersion": "v1",
},
"specs": {
"leadSinger": "John Lennon",
"genre": "60s rock",
},
},
},
{
name: "Latest common non-preferred supported version v2beta2 is restored (Priority 3, Case D).",
srcCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml",
"namespace": "music-system",
},
name: "Latest common non-preferred supported version v2beta2 is restored (Priority 3, Case D).",
srcCrdYaml: "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml",
srcCRs: map[string]string{
"v2beta2": "testdata/enable_api_group_versions/music_v2beta2_rockband.yaml",
"v2beta1": "testdata/enable_api_group_versions/music_v2beta1_rockband.yaml",
"v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml",
},
tgtCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml",
"namespace": "music-system",
},
tgtVer: "v2beta2",
cm: nil,
tgtCrdYaml: "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml",
tgtVer: "v2beta2",
cm: nil,
want: map[string]map[string]string{
"annotations": {
"rockbands.music.example.io/originalVersion": "v2beta2",
},
"specs": {
"leadSinger": "John Lennon",
"leadGuitar": "George Harrison",
"drummer": "Ringo Starr",
"genre": "60s rock",
},
},
},
{
name: "No common supported versions means no rockbands custom resource is restored.",
srcCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-a-source.yaml",
"namespace": "music-system",
},
name: "No common supported versions means no rockbands custom resource is restored.",
srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml",
srcCRs: map[string]string{
"v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml",
"v1alpha1": "testdata/enable_api_group_versions/music_v1alpha1_rockband.yaml",
},
tgtCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-b-target-manually-added-mutations.yaml",
"namespace": "music-system",
},
tgtVer: "",
cm: nil,
want: nil,
tgtCrdYaml: "testdata/enable_api_group_versions/case-b-target-manually-added-mutations.yaml",
tgtVer: "",
cm: nil,
want: nil,
},
{
name: "User config map overrides Priority 3, Case D and restores v2beta1",
srcCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml",
"namespace": "music-system",
},
name: "User config map overrides Priority 3, Case D and restores v2beta1",
srcCrdYaml: "testdata/enable_api_group_versions/case-b-source-manually-added-mutations.yaml",
srcCRs: map[string]string{
"v2beta2": "testdata/enable_api_group_versions/music_v2beta2_rockband.yaml",
"v2beta1": "testdata/enable_api_group_versions/music_v2beta1_rockband.yaml",
"v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml",
},
tgtCRD: map[string]string{
"url": "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml",
"namespace": "music-system",
},
tgtVer: "v2beta1",
tgtCrdYaml: "testdata/enable_api_group_versions/case-d-target-manually-added-mutations.yaml",
tgtVer: "v2beta1",
cm: builder.ForConfigMap(veleroNamespace, "enableapigroupversions").Data(
"restoreResourcesVersionPriority",
`rockbands.music.example.io=v2beta1,v2beta2,v2`,
@@ -189,101 +183,119 @@ func RunEnableAPIGroupVersionsTests(ctx context.Context, resource, group string,
"rockbands.music.example.io/originalVersion": "v2beta1",
},
"specs": {
"leadSinger": "John Lennon",
"leadGuitar": "George Harrison",
"genre": "60s rock",
"genre": "60s rock",
},
},
},
{
name: "Restore successful when CRD doesn't (yet) exist in target",
srcCrdYaml: "testdata/enable_api_group_versions/case-a-source.yaml",
srcCRs: map[string]string{
"v1": "testdata/enable_api_group_versions/music_v1_rockband.yaml",
},
tgtCrdYaml: "",
tgtVer: "v1",
cm: nil,
want: map[string]map[string]string{
"annotations": {
"rockbands.music.example.io/originalVersion": "v1",
},
"specs": {
"genre": "60s rock",
},
},
},
}
for i, tc := range tests {
fmt.Printf("\n====== Test Case %d ======\n", i)
fmt.Printf("\n====== Test Case %d: %s ======\n", i, tc.name)
err := InstallCRD(ctx, tc.srcCRD["url"], tc.srcCRD["namespace"])
err := installCRD(ctx, tc.srcCrdYaml)
if err != nil {
return errors.Wrap(err, "installing music-system CRD for source cluster")
return errors.Wrap(err, "install music-system CRD on source cluster")
}
for version, cr := range tc.srcCRs {
ns := resource + "-src-" + version
if err := CreateNamespace(ctx, client, ns); err != nil {
return errors.Wrapf(err, "creating %s namespace", ns)
if err := createNamespace(ctx, client, ns); err != nil {
return errors.Wrapf(err, "create %s namespace", ns)
}
if err := InstallCR(ctx, cr, ns); err != nil {
return errors.Wrapf(err, "installing %s custom resource on source cluster namespace %s", cr, ns)
if err := installCR(ctx, cr, ns); err != nil {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "install %s custom resource on source cluster in namespace %s", cr, ns)
}
tc.namespaces = append(tc.namespaces, ns)
}
// TODO - Velero needs to be installed AFTER CRDs are installed because of https://github.com/vmware-tanzu/velero/issues/3471
// Once that issue is fixed, we should install Velero once for the test suite
if installVelero {
VeleroInstall(context.Background(), veleroImage, veleroNamespace, cloudProvider, objectStoreProvider, false,
cloudCredentialsFile, bslBucket, bslPrefix, bslConfig, vslConfig,
"EnableAPIGroupVersions" /* TODO - remove this when the feature flag is removed */)
fmt.Println("Sleep 20s to wait for Velero to stabilize after install.")
time.Sleep(time.Second * 20)
// Restart Velero pods in order to recognize music-system CRD right away
// instead of waiting for discovery helper to refresh. See
// https://github.com/vmware-tanzu/velero/issues/3471.
if err := restartPods(ctx, veleroNamespace); err != nil {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "restart Velero pods")
}
backup := "backup-rockbands-" + uuidgen.String() + "-" + strconv.Itoa(i)
namespacesStr := strings.Join(tc.namespaces, ",")
err = VeleroBackupNamespace(ctx, veleroCLI, veleroNamespace, backup, namespacesStr, "", false)
err = veleroBackupNamespace(ctx, veleroCLI, veleroNamespace, backup, namespacesStr, "", false)
if err != nil {
VeleroBackupLogs(ctx, veleroCLI, veleroNamespace, backup)
return errors.Wrapf(err, "backing up %s namespaces on source cluster", namespacesStr)
veleroBackupLogs(ctx, veleroCLI, veleroNamespace, backup)
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "back up %s namespaces on source cluster", namespacesStr)
}
// Delete music-system CRD and controllers installed on source cluster.
if err := DeleteCRD(ctx, tc.srcCRD["url"], tc.srcCRD["namespace"]); err != nil {
return errors.Wrapf(err, "deleting music-system CRD from source cluster")
if err := deleteCRD(ctx, tc.srcCrdYaml); err != nil {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "delete music-system CRD from source cluster")
}
for _, ns := range tc.namespaces {
if err := client.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "deleting %s namespace from source cluster", ns)
}
if err := WaitNamespaceDelete(ctx, ns); err != nil {
return errors.Wrapf(err, "deleting %s namespace from source cluster", ns)
if err := deleteNamespace(ctx, ns); err != nil {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "delete %s namespace from source cluster", ns)
}
}
// Install music-system CRD for target cluster.
if err := InstallCRD(ctx, tc.tgtCRD["url"], tc.tgtCRD["namespace"]); err != nil {
return errors.Wrapf(err, "installing music-system CRD for target cluster")
if tc.tgtCrdYaml != "" {
if err := installCRD(ctx, tc.tgtCrdYaml); err != nil {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "install music-system CRD on target cluster")
}
}
// Apply config map if there is one.
if tc.cm != nil {
_, err := client.CoreV1().ConfigMaps(veleroNamespace).Create(ctx, tc.cm, metav1.CreateOptions{})
_, err := client.clientGo.CoreV1().ConfigMaps(veleroNamespace).Create(ctx, tc.cm, metav1.CreateOptions{})
if err != nil {
return errors.Wrap(err, "creating config map with user version priorities")
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrap(err, "create config map with user version priorities")
}
}
// Reset Velero to recognize music-system CRD.
if err := RestartPods(ctx, veleroNamespace); err != nil {
return errors.Wrapf(err, "restarting Velero pods")
if err := restartPods(ctx, veleroNamespace); err != nil {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "restart Velero pods")
}
fmt.Println("Sleep 20s to wait for Velero to stabilize after restart.")
time.Sleep(time.Second * 20)
// Restore rockbands namespace.
// Restore rockbands namespaces.
restore := "restore-rockbands-" + uuidgen.String() + "-" + strconv.Itoa(i)
if tc.want != nil {
if err := VeleroRestore(ctx, veleroCLI, veleroNamespace, restore, backup); err != nil {
VeleroRestoreLogs(ctx, veleroCLI, veleroNamespace, restore)
return errors.Wrapf(err, "restoring %s namespaces on target cluster", namespacesStr)
if err := veleroRestore(ctx, veleroCLI, veleroNamespace, restore, backup); err != nil {
veleroRestoreLogs(ctx, veleroCLI, veleroNamespace, restore)
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(err, "restore %s namespaces on target cluster", namespacesStr)
}
annoSpec, err := resourceInfo(ctx, group, tc.tgtVer, resource)
if err != nil {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.Wrapf(
err,
"get annotation and spec from %s.%s/%s object",
@@ -300,6 +312,7 @@ func RunEnableAPIGroupVersionsTests(ctx context.Context, resource, group string,
annoSpec["annotations"],
tc.want["annotations"],
)
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.New(msg)
}
@@ -310,180 +323,104 @@ func RunEnableAPIGroupVersionsTests(ctx context.Context, resource, group string,
annoSpec["specs"],
tc.want["specs"],
)
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.New(msg)
}
} else {
// No custom resource should have been restored. Expect "no resource found"
// error during restore.
err := VeleroRestore(ctx, veleroCLI, veleroNamespace, restore, backup)
err := veleroRestore(ctx, veleroCLI, veleroNamespace, restore, backup)
if err.Error() != "Unexpected restore phase got PartiallyFailed, expecting Completed" {
deleteNamespacesOnErr(ctx, tc.namespaces)
return errors.New("expected error but not none")
}
}
// Delete namespaces created for CRs
// Clean up.
for _, ns := range tc.namespaces {
fmt.Println("Delete namespace", ns)
_ = client.CoreV1().Namespaces().Delete(ctx, ns, metav1.DeleteOptions{})
_ = WaitNamespaceDelete(ctx, ns)
deleteNamespace(ctx, ns)
}
// Delete source cluster music-system CRD
_ = DeleteCRD(
ctx,
tc.srcCRD["url"],
tc.srcCRD["namespace"],
)
// Delete target cluster music-system CRD
_ = DeleteCRD(
ctx,
tc.tgtCRD["url"],
tc.srcCRD["namespace"],
)
// Uninstall Velero
if installVelero {
err = VeleroUninstall(ctx, client, extensionsClient, veleroNamespace)
if err != nil {
return err
}
_ = deleteCRD(ctx, tc.srcCrdYaml)
if tc.tgtCrdYaml != "" {
_ = deleteCRD(ctx, tc.tgtCrdYaml)
}
}
return nil
}
func installVeleroForAPIGroups(ctx context.Context) error {
if err := EnsureClusterExists(ctx); err != nil {
return errors.Wrap(err, "check cluster exists")
}
func installCRD(ctx context.Context, yaml string) error {
fmt.Printf("Install CRD with %s.\n", yaml)
cmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", yaml)
// Pass global variables to option parameters.
options, err := GetProviderVeleroInstallOptions(
cloudProvider,
cloudCredentialsFile,
bslBucket,
bslPrefix,
bslConfig,
vslConfig,
getProviderPlugins(cloudProvider),
"EnableAPIGroupVersions",
)
if err != nil {
return errors.Wrap(err, "get velero install options")
}
options.UseRestic = false
options.Features = "EnableAPIGroupVersions"
options.Image = veleroImage
if err := InstallVeleroServer(options); err != nil {
return errors.Wrap(err, "install velero server")
}
return nil
}
func InstallCRD(ctx context.Context, crdFile, ns string) error {
fmt.Printf("Install CRD %s.\n", crdFile)
cmd := exec.CommandContext(ctx, "kubectl", "apply", "-f", crdFile)
_, stderr, err := veleroexec.RunCommand(cmd)
if err != nil {
return errors.Wrap(err, stderr)
}
fmt.Println("Wait for CRD to be ready.")
if err := WaitForPodContainers(ctx, ns); err != nil {
return err
}
return err
return nil
}
// WaitForPodContainers will get the pods and container status in a namespace.
// If the ratio of the number of containers running to total in a pod is not 1,
// it is not ready. Otherwise, if all container ratios are 1, the pod is running.
func WaitForPodContainers(ctx context.Context, ns string) error {
err := wait.Poll(3*time.Second, 4*time.Minute, func() (bool, error) {
cmd := exec.CommandContext(ctx, "kubectl", "get", "pods", "-n", ns)
stdout, stderr, err := veleroexec.RunCommand(cmd)
if err != nil {
return false, errors.Wrap(err, stderr)
}
re := regexp.MustCompile(`(\d)/(\d)\s+Running`)
// Default allRunning needs to be false for when no match is found.
var allRunning bool
for i, v := range re.FindAllStringSubmatch(stdout, -1) {
if i == 0 {
allRunning = true
}
allRunning = v[1] == v[2] && allRunning
}
return allRunning, nil
})
if err == nil {
fmt.Println("Sleep for 20s for cluster to stabilize.")
time.Sleep(time.Second * 20)
}
return err
}
func DeleteCRD(ctx context.Context, crdFile, ns string) error {
fmt.Println("Delete CRD", crdFile)
cmd := exec.CommandContext(ctx, "kubectl", "delete", "-f", crdFile, "--wait")
func deleteCRD(ctx context.Context, yaml string) error {
fmt.Println("Delete CRD", yaml)
cmd := exec.CommandContext(ctx, "kubectl", "delete", "-f", yaml, "--wait")
_, stderr, err := veleroexec.RunCommand(cmd)
if strings.Contains(stderr, "not found") {
return nil
}
if err != nil {
return errors.Wrap(err, stderr)
}
err = wait.Poll(1*time.Second, 3*time.Minute, func() (bool, error) {
cmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", ns)
stdout, stderr, err := veleroexec.RunCommand(cmd)
if strings.Contains(stderr, "not found") {
return true, nil
}
if err != nil {
return false, errors.Wrap(err, stderr)
}
re := regexp.MustCompile(ns)
return re.MatchString(stdout), nil
})
return err
return nil
}
func RestartPods(ctx context.Context, ns string) error {
func restartPods(ctx context.Context, ns string) error {
fmt.Printf("Restart pods in %s namespace.\n", ns)
cmd := exec.CommandContext(ctx, "kubectl", "delete", "pod", "--all", "-n", ns, "--wait=true")
cmd := exec.CommandContext(ctx, "kubectl", "delete", "pod", "--all", "-n", ns)
_, _, err := veleroexec.RunCommand(cmd)
_, stderr, err := veleroexec.RunCommand(cmd)
if strings.Contains(stderr, "not found") {
return nil
}
if err != nil {
return errors.Wrap(err, stderr)
}
return nil
}
if err == nil {
fmt.Println("Wait for pods to be ready.")
if err := WaitForPodContainers(ctx, ns); err != nil {
return err
}
func deleteNamespace(ctx context.Context, ns string) error {
fmt.Println("Delete namespace", ns)
cmd := exec.CommandContext(ctx, "kubectl", "delete", "ns", ns, "--wait")
_, stderr, err := veleroexec.RunCommand(cmd)
if strings.Contains(stderr, "not found") {
return nil
}
if err != nil {
return errors.Wrap(err, stderr)
}
return err
return nil
}
func InstallCR(ctx context.Context, crFile, ns string) error {
// DeleteNamespacesOnErr cleans up the namespaces created for a test cast after an
// error interrupts a test case.
func deleteNamespacesOnErr(ctx context.Context, namespaces []string) {
if len(namespaces) > 0 {
fmt.Println("An error has occurred. Cleaning up test case namespaces.")
}
for _, ns := range namespaces {
deleteNamespace(ctx, ns)
}
}
func installCR(ctx context.Context, crFile, ns string) error {
retries := 5
var stderr string
var err error
@@ -503,22 +440,6 @@ func InstallCR(ctx context.Context, crFile, ns string) error {
return errors.Wrap(err, stderr)
}
func WaitNamespaceDelete(ctx context.Context, ns string) error {
err := wait.Poll(1*time.Second, 3*time.Minute, func() (bool, error) {
cmd := exec.CommandContext(ctx, "kubectl", "get", "namespace", ns)
stdout, stderr, err := veleroexec.RunCommand(cmd)
if err != nil {
return false, errors.Wrap(err, stderr)
}
re := regexp.MustCompile(ns)
return re.MatchString(stdout), nil
})
return err
}
func resourceInfo(ctx context.Context, g, v, r string) (map[string]map[string]string, error) {
rvg := r + "." + v + "." + g
ns := r + "-src-" + v

View File

@@ -1,3 +1,19 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
@@ -8,7 +24,6 @@ import (
"github.com/pkg/errors"
"golang.org/x/net/context"
"k8s.io/client-go/kubernetes"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@@ -77,15 +92,15 @@ func verifyData(ctx context.Context, namespace string, levels int, filesPerLevel
return nil
}
// RunKibishiiTests runs kibishii tests on the provider.
func RunKibishiiTests(client *kubernetes.Clientset, providerName, veleroCLI, veleroNamespace, backupName, restoreName, backupLocation string,
// runKibishiiTests runs kibishii tests on the provider.
func runKibishiiTests(client testClient, providerName, veleroCLI, veleroNamespace, backupName, restoreName, backupLocation string,
useVolumeSnapshots bool) error {
fiveMinTimeout, _ := context.WithTimeout(context.Background(), 5*time.Minute)
oneHourTimeout, _ := context.WithTimeout(context.Background(), time.Minute*60)
timeout := 10 * time.Minute
interval := 5 * time.Second
if err := CreateNamespace(fiveMinTimeout, client, kibishiiNamespace); err != nil {
if err := createNamespace(fiveMinTimeout, client, kibishiiNamespace); err != nil {
return errors.Wrapf(err, "Failed to create namespace %s to install Kibishii workload", kibishiiNamespace)
}
@@ -104,8 +119,8 @@ func RunKibishiiTests(client *kubernetes.Clientset, providerName, veleroCLI, vel
return errors.Wrap(err, "Failed to generate data")
}
if err := VeleroBackupNamespace(oneHourTimeout, veleroCLI, veleroNamespace, backupName, kibishiiNamespace, backupLocation, useVolumeSnapshots); err != nil {
VeleroBackupLogs(fiveMinTimeout, veleroCLI, veleroNamespace, backupName)
if err := veleroBackupNamespace(oneHourTimeout, veleroCLI, veleroNamespace, backupName, kibishiiNamespace, backupLocation, useVolumeSnapshots); err != nil {
veleroBackupLogs(fiveMinTimeout, veleroCLI, veleroNamespace, backupName)
return errors.Wrapf(err, "Failed to backup kibishii namespace %s", kibishiiNamespace)
}
@@ -118,17 +133,17 @@ func RunKibishiiTests(client *kubernetes.Clientset, providerName, veleroCLI, vel
}
}
fmt.Printf("Simulating a disaster by removing namespace %s\n", kibishiiNamespace)
if err := client.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
if err := client.clientGo.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
return errors.Wrap(err, "Failed to simulate a disaster")
}
// wait for ns delete
err := WaitForNamespaceDeletion(interval, timeout, client, kibishiiNamespace)
err := waitForNamespaceDeletion(interval, timeout, client, kibishiiNamespace)
if err != nil {
return errors.Wrapf(err, fmt.Sprintf("Failed to wait for deletion of namespace %s", kibishiiNamespace))
}
if err := VeleroRestore(oneHourTimeout, veleroCLI, veleroNamespace, restoreName, backupName); err != nil {
VeleroRestoreLogs(fiveMinTimeout, veleroCLI, veleroNamespace, restoreName)
if err := veleroRestore(oneHourTimeout, veleroCLI, veleroNamespace, restoreName, backupName); err != nil {
veleroRestoreLogs(fiveMinTimeout, veleroCLI, veleroNamespace, restoreName)
return errors.Wrapf(err, "Restore %s failed from backup %s", restoreName, backupName)
}
@@ -145,17 +160,17 @@ func RunKibishiiTests(client *kubernetes.Clientset, providerName, veleroCLI, vel
return errors.Wrap(err, "Failed to verify data generated by kibishii")
}
if err := client.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed to cleanup %s wrokload namespace", kibishiiNamespace)
if err := client.clientGo.CoreV1().Namespaces().Delete(oneHourTimeout, kibishiiNamespace, metav1.DeleteOptions{}); err != nil {
return errors.Wrapf(err, "Failed to cleanup %s workload namespace", kibishiiNamespace)
}
// wait for ns delete
if err = WaitForNamespaceDeletion(interval, timeout, client, kibishiiNamespace); err != nil {
if err = waitForNamespaceDeletion(interval, timeout, client, kibishiiNamespace); err != nil {
return errors.Wrapf(err, fmt.Sprintf("Failed to wait for deletion of namespace %s", kibishiiNamespace))
}
fmt.Printf("kibishii test completed successfully\n")
return nil
}
func waitForKibishiiPods(ctx context.Context, client *kubernetes.Clientset, kibishiiNamespace string) error {
return WaitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"})
func waitForKibishiiPods(ctx context.Context, client testClient, kibishiiNamespace string) error {
return waitForPods(ctx, client, kibishiiNamespace, []string{"jump-pad", "etcd0", "etcd1", "etcd2", "kibishii-deployment-0", "kibishii-deployment-1"})
}

View File

@@ -4,18 +4,12 @@ This directory contains Kubernetes manifests that are used for the enable API gr
## Documentation
Read more about cert-manager in the [Jet Stack repo](https://github.com/jetstack/cert-manager/blob/master/README.md).
Read more about the music-system custom resource definitions and rockbands custom resources created for Velero tests at [@brito-rafa's repo](https://github.com/brito-rafa/k8s-webhooks/blob/master/examples-for-projectvelero/README.md).
## Reference
These manifests, listed below, come from two different sources: github.com/jetstack/cert-manager and github.com/brito-rafa/k8s-webhooks:
cert-manager.yaml
- source: https://github.com/jetstack/cert-manager/releases/download/v1.0.3/cert-manager.yaml
- license: https://github.com/jetstack/cert-manager/blob/master/LICENSE
These manifests, listed below, come from github.com/brito-rafa/k8s-webhooks:
case-a-source.yaml

View File

@@ -1,10 +1,3 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: music-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -13,18 +6,6 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.5
name: rockbands.music.example.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /convert
conversionReviewVersions:
- v1
- v1alpha1
group: music.example.io
names:
kind: RockBand
@@ -33,363 +14,77 @@ spec:
singular: rockband
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
leadSinger:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- name: v1alpha1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
required:
- lastPlayed
type: object
type: object
served: true
storage: false
- name: v1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
type: string
kind:
description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
leadSinger:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- name: v1alpha1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
type: string
kind:
description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
required:
- lastPlayed
type: object
type: object
served: true
storage: false
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: music-leader-election-role
namespace: music-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: music-manager-role
rules:
- apiGroups:
- music.example.io
resources:
- rockbands
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- music.example.io
resources:
- rockbands/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: music-proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: music-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: music-leader-election-rolebinding
namespace: music-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: music-leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager-metrics-service
namespace: music-system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-manager
---
apiVersion: v1
kind: Service
metadata:
name: music-webhook-service
namespace: music-system
spec:
ports:
- port: 443
targetPort: 9443
selector:
control-plane: controller-manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager
namespace: music-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
- args:
- --metrics-addr=127.0.0.1:8080
- --enable-leader-election
command:
- /manager
image: quay.io/brito_rafa/music-controller:case-a-source-v0.1
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 10
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: music-serving-cert
namespace: music-system
spec:
dnsNames:
- music-webhook-service.music-system.svc
- music-webhook-service.music-system.svc.cluster.local
issuerRef:
kind: Issuer
name: music-selfsigned-issuer
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: music-selfsigned-issuer
namespace: music-system
spec:
selfSigned: {}
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v1-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- rockbands
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /validate-music-example-io-v1-rockband
failurePolicy: Fail
name: vrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- rockbands

View File

@@ -1,10 +1,3 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: music-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -13,18 +6,6 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.5
name: rockbands.music.example.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /convert
conversionReviewVersions:
- v1
- v1alpha1
group: music.example.io
names:
kind: RockBand
@@ -33,367 +14,81 @@ spec:
singular: rockband
scope: Namespaced
versions:
- name: v1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
leadSinger:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- name: v2beta1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
leadGuitar:
type: string
leadSinger:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
required:
- lastPlayed
type: object
type: object
served: true
storage: false
- name: v1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
type: string
kind:
description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
leadSinger:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
type: object
type: object
served: true
storage: true
subresources:
status: {}
- name: v2beta1
schema:
openAPIV3Schema:
description: RockBand is the Schema for the rockbands API
properties:
apiVersion:
description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources"
type: string
kind:
description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
type: string
metadata:
type: object
spec:
description: RockBandSpec defines the desired state of RockBand
properties:
genre:
type: string
leadGuitar:
type: string
leadSinger:
type: string
numberComponents:
format: int32
type: integer
type: object
status:
description: RockBandStatus defines the observed state of RockBand
properties:
lastPlayed:
type: string
required:
- lastPlayed
type: object
type: object
served: true
storage: false
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: music-leader-election-role
namespace: music-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: music-manager-role
rules:
- apiGroups:
- music.example.io
resources:
- rockbands
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- music.example.io
resources:
- rockbands/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: music-proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: music-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: music-leader-election-rolebinding
namespace: music-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: music-leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager-metrics-service
namespace: music-system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-manager
---
apiVersion: v1
kind: Service
metadata:
name: music-webhook-service
namespace: music-system
spec:
ports:
- port: 443
targetPort: 9443
selector:
control-plane: controller-manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager
namespace: music-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
- args:
- --metrics-addr=127.0.0.1:8080
- --enable-leader-election
command:
- /manager
image: quay.io/brito_rafa/music-controller:case-a-target-v0.2
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 10
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: music-serving-cert
namespace: music-system
spec:
dnsNames:
- music-webhook-service.music-system.svc
- music-webhook-service.music-system.svc.cluster.local
issuerRef:
kind: Issuer
name: music-selfsigned-issuer
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: music-selfsigned-issuer
namespace: music-system
spec:
selfSigned: {}
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2beta1-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta1
operations:
- CREATE
- UPDATE
resources:
- rockbands
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /validate-music-example-io-v1-rockband
failurePolicy: Fail
name: vrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- rockbands

View File

@@ -1,10 +1,3 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: music-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -13,19 +6,6 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.5
name: rockbands.music.example.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /convert
conversionReviewVersions:
- v1
- v2beta1
- v2beta2
group: music.example.io
names:
kind: RockBand
@@ -151,325 +131,3 @@ status:
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: music-leader-election-role
namespace: music-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: music-manager-role
rules:
- apiGroups:
- music.example.io
resources:
- rockbands
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- music.example.io
resources:
- rockbands/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: music-proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: music-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: music-leader-election-rolebinding
namespace: music-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: music-leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager-metrics-service
namespace: music-system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-manager
---
apiVersion: v1
kind: Service
metadata:
name: music-webhook-service
namespace: music-system
spec:
ports:
- port: 443
targetPort: 9443
selector:
control-plane: controller-manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager
namespace: music-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
- args:
- --metrics-addr=127.0.0.1:8080
- --enable-leader-election
command:
- /manager
image: quay.io/brito_rafa/music-controller:case-b-source-v0.1
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 10
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: music-serving-cert
namespace: music-system
spec:
dnsNames:
- music-webhook-service.music-system.svc
- music-webhook-service.music-system.svc.cluster.local
issuerRef:
kind: Issuer
name: music-selfsigned-issuer
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: music-selfsigned-issuer
namespace: music-system
spec:
selfSigned: {}
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2beta2-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta2
operations:
- CREATE
- UPDATE
resources:
- rockbands
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2beta1-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta1
operations:
- CREATE
- UPDATE
resources:
- rockbands
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v1-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- rockbands
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /validate-music-example-io-v2beta2-rockband
failurePolicy: Fail
name: vrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta2
operations:
- CREATE
- UPDATE
resources:
- rockbands

View File

@@ -1,10 +1,3 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: music-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -13,19 +6,6 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.5
name: rockbands.music.example.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /convert
conversionReviewVersions:
- v2beta2
- v2beta1
- v1
group: music.example.io
names:
kind: RockBand
@@ -116,307 +96,3 @@ status:
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: music-leader-election-role
namespace: music-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: music-manager-role
rules:
- apiGroups:
- music.example.io
resources:
- rockbands
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- music.example.io
resources:
- rockbands/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: music-proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: music-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: music-leader-election-rolebinding
namespace: music-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: music-leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager-metrics-service
namespace: music-system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-manager
---
apiVersion: v1
kind: Service
metadata:
name: music-webhook-service
namespace: music-system
spec:
ports:
- port: 443
targetPort: 9443
selector:
control-plane: controller-manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager
namespace: music-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
- args:
- --metrics-addr=127.0.0.1:8080
- --enable-leader-election
command:
- /manager
image: quay.io/brito_rafa/music-controller:case-b-target-v0.1
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 10
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: music-serving-cert
namespace: music-system
spec:
dnsNames:
- music-webhook-service.music-system.svc
- music-webhook-service.music-system.svc.cluster.local
issuerRef:
kind: Issuer
name: music-selfsigned-issuer
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: music-selfsigned-issuer
namespace: music-system
spec:
selfSigned: {}
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2beta1-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta1
operations:
- CREATE
- UPDATE
resources:
- rockbands
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2beta2-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta2
operations:
- CREATE
- UPDATE
resources:
- rockbands
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /validate-music-example-io-v2beta2-rockband
failurePolicy: Fail
name: vrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta2
operations:
- CREATE
- UPDATE
resources:
- rockbands

View File

@@ -1,10 +1,3 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: music-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -13,18 +6,6 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.5
name: rockbands.music.example.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /convert
conversionReviewVersions:
- v2
- v1
group: music.example.io
names:
kind: RockBand
@@ -115,307 +96,3 @@ status:
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: music-leader-election-role
namespace: music-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: music-manager-role
rules:
- apiGroups:
- music.example.io
resources:
- rockbands
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- music.example.io
resources:
- rockbands/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: music-proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: music-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: music-leader-election-rolebinding
namespace: music-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: music-leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager-metrics-service
namespace: music-system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-manager
---
apiVersion: v1
kind: Service
metadata:
name: music-webhook-service
namespace: music-system
spec:
ports:
- port: 443
targetPort: 9443
selector:
control-plane: controller-manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager
namespace: music-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
- args:
- --metrics-addr=127.0.0.1:8080
- --enable-leader-election
command:
- /manager
image: quay.io/brito_rafa/music-controller:case-c-target-v0.1
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 10
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: music-serving-cert
namespace: music-system
spec:
dnsNames:
- music-webhook-service.music-system.svc
- music-webhook-service.music-system.svc.cluster.local
issuerRef:
kind: Issuer
name: music-selfsigned-issuer
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: music-selfsigned-issuer
namespace: music-system
spec:
selfSigned: {}
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v1-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- rockbands
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2
operations:
- CREATE
- UPDATE
resources:
- rockbands
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /validate-music-example-io-v1-rockband
failurePolicy: Fail
name: vrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v1
operations:
- CREATE
- UPDATE
resources:
- rockbands

View File

@@ -1,10 +1,3 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
control-plane: controller-manager
name: music-system
---
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
@@ -13,20 +6,6 @@ metadata:
controller-gen.kubebuilder.io/version: v0.2.5
name: rockbands.music.example.io
spec:
conversion:
strategy: Webhook
webhook:
clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /convert
conversionReviewVersions:
- v2
- v2beta2
- v2beta1
- v1
group: music.example.io
names:
kind: RockBand
@@ -158,325 +137,3 @@ status:
plural: ""
conditions: []
storedVersions: []
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: music-leader-election-role
namespace: music-system
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
- update
- patch
- delete
- apiGroups:
- ""
resources:
- configmaps/status
verbs:
- get
- update
- patch
- apiGroups:
- ""
resources:
- events
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
creationTimestamp: null
name: music-manager-role
rules:
- apiGroups:
- music.example.io
resources:
- rockbands
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- music.example.io
resources:
- rockbands/status
verbs:
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: music-proxy-role
rules:
- apiGroups:
- authentication.k8s.io
resources:
- tokenreviews
verbs:
- create
- apiGroups:
- authorization.k8s.io
resources:
- subjectaccessreviews
verbs:
- create
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: music-metrics-reader
rules:
- nonResourceURLs:
- /metrics
verbs:
- get
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: music-leader-election-rolebinding
namespace: music-system
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: music-leader-election-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-manager-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-manager-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: music-proxy-rolebinding
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: music-proxy-role
subjects:
- kind: ServiceAccount
name: default
namespace: music-system
---
apiVersion: v1
kind: Service
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager-metrics-service
namespace: music-system
spec:
ports:
- name: https
port: 8443
targetPort: https
selector:
control-plane: controller-manager
---
apiVersion: v1
kind: Service
metadata:
name: music-webhook-service
namespace: music-system
spec:
ports:
- port: 443
targetPort: 9443
selector:
control-plane: controller-manager
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
control-plane: controller-manager
name: music-controller-manager
namespace: music-system
spec:
replicas: 1
selector:
matchLabels:
control-plane: controller-manager
template:
metadata:
labels:
control-plane: controller-manager
spec:
containers:
- args:
- --secure-listen-address=0.0.0.0:8443
- --upstream=http://127.0.0.1:8080/
- --logtostderr=true
- --v=10
image: gcr.io/kubebuilder/kube-rbac-proxy:v0.5.0
name: kube-rbac-proxy
ports:
- containerPort: 8443
name: https
- args:
- --metrics-addr=127.0.0.1:8080
- --enable-leader-election
command:
- /manager
image: quay.io/brito_rafa/music-controller:case-d-target-v0.1
name: manager
ports:
- containerPort: 9443
name: webhook-server
protocol: TCP
resources:
limits:
cpu: 100m
memory: 30Mi
requests:
cpu: 100m
memory: 20Mi
volumeMounts:
- mountPath: /tmp/k8s-webhook-server/serving-certs
name: cert
readOnly: true
terminationGracePeriodSeconds: 10
volumes:
- name: cert
secret:
defaultMode: 420
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Certificate
metadata:
name: music-serving-cert
namespace: music-system
spec:
dnsNames:
- music-webhook-service.music-system.svc
- music-webhook-service.music-system.svc.cluster.local
issuerRef:
kind: Issuer
name: music-selfsigned-issuer
secretName: webhook-server-cert
---
apiVersion: cert-manager.io/v1alpha2
kind: Issuer
metadata:
name: music-selfsigned-issuer
namespace: music-system
spec:
selfSigned: {}
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-mutating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2beta2-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta2
operations:
- CREATE
- UPDATE
resources:
- rockbands
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2beta1-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta1
operations:
- CREATE
- UPDATE
resources:
- rockbands
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /mutate-music-example-io-v2-rockband
failurePolicy: Fail
name: mrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2
operations:
- CREATE
- UPDATE
resources:
- rockbands
---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: ValidatingWebhookConfiguration
metadata:
annotations:
cert-manager.io/inject-ca-from: music-system/music-serving-cert
name: music-validating-webhook-configuration
webhooks:
- clientConfig:
caBundle: Cg==
service:
name: music-webhook-service
namespace: music-system
path: /validate-music-example-io-v2beta2-rockband
failurePolicy: Fail
name: vrockband.kb.io
rules:
- apiGroups:
- music.example.io
apiVersions:
- v2beta2
operations:
- CREATE
- UPDATE
resources:
- rockbands

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,5 @@ metadata:
annotations:
rockbands.music.example.io/originalVersion: v1
spec:
# Add fields here
genre: '60s rock'
genre: "60s rock"
numberComponents: 4
leadSinger: John

View File

@@ -5,7 +5,5 @@ metadata:
annotations:
rockbands.music.example.io/originalVersion: v1alpha1
spec:
# Add fields here
genre: '60s rock'
genre: "60s rock"
numberComponents: 4

View File

@@ -3,10 +3,5 @@ kind: RockBand
metadata:
name: beatles
spec:
# Add fields here
genre: '60s rock'
genre: "60s rock"
numberComponents: 4
leadSinger: John
leadGuitar: George
drummer: Ringo
bass: Paul

View File

@@ -5,9 +5,5 @@ metadata:
annotations:
rockbands.music.example.io/originalVersion: v2beta1
spec:
# Add fields here
genre: '60s rock'
genre: "60s rock"
numberComponents: 4
leadSinger: John
leadGuitar: George

View File

@@ -5,9 +5,5 @@ metadata:
annotations:
rockbands.music.example.io/originalVersion: v2beta2
spec:
# Add fields here
genre: '60s rock'
genre: "60s rock"
numberComponents: 4
leadSinger: John
leadGuitar: George
drummer: Ringo

View File

@@ -1,3 +1,19 @@
/*
Copyright the Velero contributors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package e2e
import (
@@ -12,21 +28,16 @@ import (
"strings"
"time"
"k8s.io/apimachinery/pkg/util/wait"
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
"github.com/pkg/errors"
"k8s.io/client-go/kubernetes"
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
"k8s.io/apimachinery/pkg/util/wait"
kbclient "sigs.k8s.io/controller-runtime/pkg/client"
velerov1api "github.com/vmware-tanzu/velero/pkg/apis/velero/v1"
"github.com/vmware-tanzu/velero/pkg/client"
cliinstall "github.com/vmware-tanzu/velero/pkg/cmd/cli/install"
"github.com/vmware-tanzu/velero/pkg/cmd/cli/uninstall"
"github.com/vmware-tanzu/velero/pkg/cmd/util/flag"
"github.com/vmware-tanzu/velero/pkg/install"
veleroexec "github.com/vmware-tanzu/velero/pkg/util/exec"
)
func getProviderPlugins(providerName string) []string {
@@ -43,8 +54,8 @@ func getProviderPlugins(providerName string) []string {
}
}
// GetProviderVeleroInstallOptions returns Velero InstallOptions for the provider.
func GetProviderVeleroInstallOptions(
// getProviderVeleroInstallOptions returns Velero InstallOptions for the provider.
func getProviderVeleroInstallOptions(
pluginProvider,
credentialsFile,
objectStoreBucket,
@@ -84,52 +95,44 @@ func GetProviderVeleroInstallOptions(
return io, nil
}
// InstallVeleroServer installs velero in the cluster.
func InstallVeleroServer(io *cliinstall.InstallOptions) error {
config, err := client.LoadConfig()
if err != nil {
return err
}
// installVeleroServer installs velero in the cluster.
func installVeleroServer(io *cliinstall.InstallOptions) error {
vo, err := io.AsVeleroOptions()
if err != nil {
return errors.Wrap(err, "Failed to translate InstallOptions to VeleroOptions for Velero")
}
f := client.NewFactory("e2e", config)
resources, err := install.AllResources(vo)
client, err := newTestClient()
if err != nil {
return errors.Wrap(err, "Failed to install Velero in the cluster")
return errors.Wrap(err, "Failed to instantiate cluster client for installing Velero")
}
dynamicClient, err := f.DynamicClient()
if err != nil {
return err
}
factory := client.NewDynamicFactory(dynamicClient)
errorMsg := "\n\nError installing Velero. Use `kubectl logs deploy/velero -n velero` to check the deploy logs"
err = install.Install(factory, resources, os.Stdout)
resources := install.AllResources(vo)
err = install.Install(client.dynamicFactory, resources, os.Stdout)
if err != nil {
return errors.Wrap(err, errorMsg)
}
fmt.Println("Waiting for Velero deployment to be ready.")
if _, err = install.DeploymentIsReady(factory, io.Namespace); err != nil {
if _, err = install.DeploymentIsReady(client.dynamicFactory, io.Namespace); err != nil {
return errors.Wrap(err, errorMsg)
}
if io.UseRestic {
fmt.Println("Waiting for Velero restic daemonset to be ready.")
if _, err = install.DaemonSetIsReady(factory, io.Namespace); err != nil {
if _, err = install.DaemonSetIsReady(client.dynamicFactory, io.Namespace); err != nil {
return errors.Wrap(err, errorMsg)
}
}
fmt.Printf("Velero is installed and ready to be tested in the %s namespace! ⛵ \n", io.Namespace)
return nil
}
// CheckBackupPhase uses veleroCLI to inspect the phase of a Velero backup.
func CheckBackupPhase(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string,
// checkBackupPhase uses veleroCLI to inspect the phase of a Velero backup.
func checkBackupPhase(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string,
expectedPhase velerov1api.BackupPhase) error {
checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "backup", "get", "-o", "json",
backupName)
@@ -172,8 +175,8 @@ func CheckBackupPhase(ctx context.Context, veleroCLI string, veleroNamespace str
return nil
}
// CheckRestorePhase uses veleroCLI to inspect the phase of a Velero restore.
func CheckRestorePhase(ctx context.Context, veleroCLI string, veleroNamespace string, restoreName string,
// checkRestorePhase uses veleroCLI to inspect the phase of a Velero restore.
func checkRestorePhase(ctx context.Context, veleroCLI string, veleroNamespace string, restoreName string,
expectedPhase velerov1api.RestorePhase) error {
checkCMD := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "restore", "get", "-o", "json",
restoreName)
@@ -216,8 +219,8 @@ func CheckRestorePhase(ctx context.Context, veleroCLI string, veleroNamespace st
return nil
}
// VeleroBackupNamespace uses the veleroCLI to backup a namespace.
func VeleroBackupNamespace(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string, namespace string, backupLocation string,
// veleroBackupNamespace uses the veleroCLI to backup a namespace.
func veleroBackupNamespace(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string, namespace string, backupLocation string,
useVolumeSnapshots bool) error {
args := []string{
"--namespace", veleroNamespace,
@@ -243,13 +246,13 @@ func VeleroBackupNamespace(ctx context.Context, veleroCLI string, veleroNamespac
if err != nil {
return err
}
err = CheckBackupPhase(ctx, veleroCLI, veleroNamespace, backupName, velerov1api.BackupPhaseCompleted)
err = checkBackupPhase(ctx, veleroCLI, veleroNamespace, backupName, velerov1api.BackupPhaseCompleted)
return err
}
// VeleroRestore uses the veleroCLI to restore from a Velero backup.
func VeleroRestore(ctx context.Context, veleroCLI string, veleroNamespace string, restoreName string, backupName string) error {
// veleroRestore uses the veleroCLI to restore from a Velero backup.
func veleroRestore(ctx context.Context, veleroCLI string, veleroNamespace string, restoreName string, backupName string) error {
restoreCmd := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "create", "restore", restoreName,
"--from-backup", backupName, "--wait")
@@ -260,10 +263,10 @@ func VeleroRestore(ctx context.Context, veleroCLI string, veleroNamespace string
if err != nil {
return err
}
return CheckRestorePhase(ctx, veleroCLI, veleroNamespace, restoreName, velerov1api.RestorePhaseCompleted)
return checkRestorePhase(ctx, veleroCLI, veleroNamespace, restoreName, velerov1api.RestorePhaseCompleted)
}
func VeleroInstall(ctx context.Context, veleroImage string, veleroNamespace string, cloudProvider string, objectStoreProvider string, useVolumeSnapshots bool,
func veleroInstall(ctx context.Context, veleroImage string, veleroNamespace string, cloudProvider string, objectStoreProvider string, useVolumeSnapshots bool,
cloudCredentialsFile string, bslBucket string, bslPrefix string, bslConfig string, vslConfig string,
features string) error {
@@ -278,6 +281,7 @@ func VeleroInstall(ctx context.Context, veleroImage string, veleroNamespace stri
}
}
// Fetch the plugins for the provider before checking for the object store provider below.
providerPlugins := getProviderPlugins(objectStoreProvider)
// TODO - handle this better
@@ -287,12 +291,16 @@ func VeleroInstall(ctx context.Context, veleroImage string, veleroNamespace stri
// Snapshot location specified
objectStoreProvider = "aws"
}
err := EnsureClusterExists(ctx)
err := ensureClusterExists(ctx)
if err != nil {
return errors.WithMessage(err, "Failed to ensure kubernetes cluster exists")
return errors.WithMessage(err, "Failed to ensure Kubernetes cluster exists")
}
veleroInstallOptions, err := GetProviderVeleroInstallOptions(objectStoreProvider, cloudCredentialsFile, bslBucket,
veleroInstallOptions, err := getProviderVeleroInstallOptions(objectStoreProvider, cloudCredentialsFile, bslBucket,
bslPrefix, bslConfig, vslConfig, providerPlugins, features)
if err != nil {
return errors.WithMessagef(err, "Failed to get Velero InstallOptions for plugin provider %s", objectStoreProvider)
}
if useVolumeSnapshots {
if cloudProvider != "vsphere" {
veleroInstallOptions.UseVolumeSnapshots = true
@@ -302,25 +310,23 @@ func VeleroInstall(ctx context.Context, veleroImage string, veleroNamespace stri
// being an AWS VSL which causes problems)
}
}
if err != nil {
return errors.WithMessagef(err, "Failed to get Velero InstallOptions for plugin provider %s", objectStoreProvider)
}
veleroInstallOptions.UseRestic = !useVolumeSnapshots
veleroInstallOptions.Image = veleroImage
veleroInstallOptions.Namespace = veleroNamespace
err = InstallVeleroServer(veleroInstallOptions)
err = installVeleroServer(veleroInstallOptions)
if err != nil {
return errors.WithMessagef(err, "Failed to install Velero in cluster")
return errors.WithMessagef(err, "Failed to install Velero in the cluster")
}
return nil
}
func VeleroUninstall(ctx context.Context, client *kubernetes.Clientset, extensionsClient *apiextensionsclient.Clientset, veleroNamespace string) error {
return uninstall.Run(ctx, client, extensionsClient, veleroNamespace, true)
func veleroUninstall(ctx context.Context, client kbclient.Client, installVelero bool, veleroNamespace string) error {
return uninstall.Run(ctx, client, veleroNamespace, true)
}
func VeleroBackupLogs(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string) error {
func veleroBackupLogs(ctx context.Context, veleroCLI string, veleroNamespace string, backupName string) error {
describeCmd := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "backup", "describe", backupName)
describeCmd.Stdout = os.Stdout
describeCmd.Stderr = os.Stderr
@@ -338,7 +344,7 @@ func VeleroBackupLogs(ctx context.Context, veleroCLI string, veleroNamespace str
return nil
}
func VeleroRestoreLogs(ctx context.Context, veleroCLI string, veleroNamespace string, restoreName string) error {
func veleroRestoreLogs(ctx context.Context, veleroCLI string, veleroNamespace string, restoreName string) error {
describeCmd := exec.CommandContext(ctx, veleroCLI, "--namespace", veleroNamespace, "restore", "describe", restoreName)
describeCmd.Stdout = os.Stdout
describeCmd.Stderr = os.Stderr
@@ -356,7 +362,7 @@ func VeleroRestoreLogs(ctx context.Context, veleroCLI string, veleroNamespace st
return nil
}
func VeleroCreateBackupLocation(ctx context.Context,
func veleroCreateBackupLocation(ctx context.Context,
veleroCLI string,
veleroNamespace string,
name string,
@@ -393,9 +399,9 @@ func VeleroCreateBackupLocation(ctx context.Context,
return bslCreateCmd.Run()
}
// VeleroAddPluginsForProvider determines which plugins need to be installed for a provider and
// veleroAddPluginsForProvider determines which plugins need to be installed for a provider and
// installs them in the current Velero installation, skipping over those that are already installed.
func VeleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNamespace string, provider string) error {
func veleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNamespace string, provider string) error {
for _, plugin := range getProviderPlugins(provider) {
stdoutBuf := new(bytes.Buffer)
stderrBuf := new(bytes.Buffer)
@@ -421,10 +427,8 @@ func VeleroAddPluginsForProvider(ctx context.Context, veleroCLI string, veleroNa
return nil
}
/*
Waits for uploads started by the Velero Plug-in for vSphere to complete
TODO - remove after upload progress monitoring is implemented
*/
// waitForVSphereUploadCompletion waits for uploads started by the Velero Plug-in for vSphere to complete
// TODO - remove after upload progress monitoring is implemented
func waitForVSphereUploadCompletion(ctx context.Context, timeout time.Duration, namespace string) error {
err := wait.PollImmediate(time.Minute, timeout, func() (bool, error) {
checkSnapshotCmd := exec.CommandContext(ctx, "kubectl",