Merge pull request #801 from ljakimczuk/master

Claim ownership of new AWS volumes by Kubernetes cluster restoring the backup
This commit is contained in:
Carlisia
2018-09-10 14:03:06 -07:00
committed by GitHub
4 changed files with 143 additions and 3 deletions

View File

@@ -151,6 +151,31 @@ Specify the following values in the example files:
* Replace `<YOUR_STORAGE_CLASS_NAME>` with `gp2`. This is AWS's default `StorageClass` name.
* (Optional) If you have multiple clusters and you want to support migration of resources between them, in file `examples/aws/10-deployment.yaml`:
* Uncomment the environment variable `AWS_CLUSTER_NAME` and replace `<YOUR_CLUSTER_NAME>` with the current cluster's name. When restoring backup, it will make Ark (and cluster it's running on) claim ownership of AWS volumes created from snapshots taken on different cluster.
The best way to get the current cluster's name is to either check it with used deployment tool or to read it directly from the EC2 instances tags.
The following listing shows how to get the cluster's nodes EC2 Tags. First, get the nodes external IDs (EC2 IDs):
```bash
kubectl get nodes -o jsonpath='{.items[*].spec.externalID}'
```
Copy one of the returned IDs `<ID>` and use it with the `aws` CLI tool to search for one of the following:
* The `kubernetes.io/cluster/<AWS_CLUSTER_NAME>` tag of the value `owned`. The `<AWS_CLUSTER_NAME>` is then your cluster's name:
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=value,Values=owned"
```
* If the first output returns nothing, then check for the legacy Tag `KubernetesCluster` of the value `<AWS_CLUSTER_NAME>`:
```bash
aws ec2 describe-tags --filters "Name=resource-id,Values=<ID>" "Name=key,Values=KubernetesCluster"
```
## Start the server
In the root of your Ark directory, run:

View File

@@ -50,6 +50,8 @@ spec:
value: /credentials/cloud
- name: ARK_SCRATCH_DIR
value: /scratch
#- name: AWS_CLUSTER_NAME
# value: <YOUR_CLUSTER_NAME>
volumes:
- name: cloud-credentials
secret:

View File

@@ -17,7 +17,9 @@ limitations under the License.
package aws
import (
"os"
"regexp"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
@@ -95,6 +97,8 @@ func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ s
return "", errors.Errorf("expected 1 snapshot from DescribeSnapshots for %s, got %v", snapshotID, count)
}
// filter tags through getTagsForCluster() function in order to apply
// proper ownership tags to restored volumes
req := &ec2.CreateVolumeInput{
SnapshotId: &snapshotID,
AvailabilityZone: &volumeAZ,
@@ -102,7 +106,7 @@ func (b *blockStore) CreateVolumeFromSnapshot(snapshotID, volumeType, volumeAZ s
TagSpecifications: []*ec2.TagSpecification{
{
ResourceType: aws.String(ec2.ResourceTypeVolume),
Tags: snapRes.Snapshots[0].Tags,
Tags: getTagsForCluster(snapRes.Snapshots[0].Tags),
},
},
}
@@ -180,6 +184,29 @@ func (b *blockStore) CreateSnapshot(volumeID, volumeAZ string, tags map[string]s
return *res.SnapshotId, nil
}
func getTagsForCluster(snapshotTags []*ec2.Tag) []*ec2.Tag {
var result []*ec2.Tag
clusterName, haveAWSClusterNameEnvVar := os.LookupEnv("AWS_CLUSTER_NAME")
if haveAWSClusterNameEnvVar {
result = append(result, ec2Tag("kubernetes.io/cluster/"+clusterName, "owned"))
result = append(result, ec2Tag("KubernetesCluster", clusterName))
}
for _, tag := range snapshotTags {
if haveAWSClusterNameEnvVar && (strings.HasPrefix(*tag.Key, "kubernetes.io/cluster/") || *tag.Key == "KubernetesCluster") {
// if the AWS_CLUSTER_NAME variable is found we want current cluster
// to overwrite the old ownership on volumes
continue
}
result = append(result, ec2Tag(*tag.Key, *tag.Value))
}
return result
}
func getTags(arkTags map[string]string, volumeTags []*ec2.Tag) []*ec2.Tag {
var result []*ec2.Tag

View File

@@ -17,6 +17,7 @@ limitations under the License.
package aws
import (
"os"
"sort"
"testing"
@@ -91,6 +92,91 @@ func TestSetVolumeID(t *testing.T) {
assert.Equal(t, "vol-updated", actual)
}
func TestGetTagsForCluster(t *testing.T) {
tests := []struct {
name string
isNameSet bool
snapshotTags []*ec2.Tag
expected []*ec2.Tag
}{
{
name: "degenerate case (no tags)",
isNameSet: false,
snapshotTags: nil,
expected: nil,
},
{
name: "cluster tags exist and remain set",
isNameSet: false,
snapshotTags: []*ec2.Tag{
ec2Tag("KubernetesCluster", "old-cluster"),
ec2Tag("kubernetes.io/cluster/old-cluster", "owned"),
ec2Tag("aws-key", "aws-val"),
},
expected: []*ec2.Tag{
ec2Tag("KubernetesCluster", "old-cluster"),
ec2Tag("kubernetes.io/cluster/old-cluster", "owned"),
ec2Tag("aws-key", "aws-val"),
},
},
{
name: "cluster tags only get applied",
isNameSet: true,
snapshotTags: nil,
expected: []*ec2.Tag{
ec2Tag("KubernetesCluster", "current-cluster"),
ec2Tag("kubernetes.io/cluster/current-cluster", "owned"),
},
},
{
name: "non-overlaping cluster and snapshot tags both get applied",
isNameSet: true,
snapshotTags: []*ec2.Tag{ec2Tag("aws-key", "aws-val")},
expected: []*ec2.Tag{
ec2Tag("KubernetesCluster", "current-cluster"),
ec2Tag("kubernetes.io/cluster/current-cluster", "owned"),
ec2Tag("aws-key", "aws-val"),
},
},
{name: "overlaping cluster tags, current cluster tags take precedence",
isNameSet: true,
snapshotTags: []*ec2.Tag{
ec2Tag("KubernetesCluster", "old-name"),
ec2Tag("kubernetes.io/cluster/old-name", "owned"),
ec2Tag("aws-key", "aws-val"),
},
expected: []*ec2.Tag{
ec2Tag("KubernetesCluster", "current-cluster"),
ec2Tag("kubernetes.io/cluster/current-cluster", "owned"),
ec2Tag("aws-key", "aws-val"),
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
if test.isNameSet {
os.Setenv("AWS_CLUSTER_NAME", "current-cluster")
}
res := getTagsForCluster(test.snapshotTags)
sort.Slice(res, func(i, j int) bool {
return *res[i].Key < *res[j].Key
})
sort.Slice(test.expected, func(i, j int) bool {
return *test.expected[i].Key < *test.expected[j].Key
})
assert.Equal(t, test.expected, res)
if test.isNameSet {
os.Unsetenv("AWS_CLUSTER_NAME")
}
})
}
}
func TestGetTags(t *testing.T) {
tests := []struct {
name string