Compare commits
14 Commits
upgrade-op
...
v0.3.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
88b697f072 | ||
|
|
1dabfb4ead | ||
|
|
410920823a | ||
|
|
3ffaeceaf4 | ||
|
|
add9023b74 | ||
|
|
af8eb9a147 | ||
|
|
1201dcf546 | ||
|
|
7bf25c897c | ||
|
|
27a57b1e51 | ||
|
|
c03642fdb1 | ||
|
|
d5b689e254 | ||
|
|
ac6e2f29e4 | ||
|
|
769c8caa71 | ||
|
|
bc8429bd6b |
@@ -1,6 +1,6 @@
|
||||
node_modules/
|
||||
dist/
|
||||
target/
|
||||
mcs
|
||||
!mcs/
|
||||
console
|
||||
!console/
|
||||
portal-ui/node_modules/
|
||||
|
||||
2
.github/workflows/go.yml
vendored
2
.github/workflows/go.yml
vendored
@@ -33,4 +33,4 @@ jobs:
|
||||
run: |
|
||||
make verifiers
|
||||
make test
|
||||
make mcs
|
||||
make console
|
||||
|
||||
6
.gitignore
vendored
6
.gitignore
vendored
@@ -19,8 +19,8 @@ vendor/
|
||||
|
||||
# Ignore executables
|
||||
target/
|
||||
mcs
|
||||
!mcs/
|
||||
console
|
||||
!console/
|
||||
|
||||
dist/
|
||||
|
||||
@@ -31,4 +31,4 @@ public.crt
|
||||
# Ignore VsCode files
|
||||
.vscode/
|
||||
*.code-workspace
|
||||
*~
|
||||
*~
|
||||
|
||||
@@ -1,11 +1,18 @@
|
||||
# This is an example goreleaser.yaml file with some sane defaults.
|
||||
# Make sure to check the documentation at http://goreleaser.com
|
||||
project_name: mcs
|
||||
project_name: console
|
||||
|
||||
release:
|
||||
name_template: "Version {{.Version}}"
|
||||
github:
|
||||
owner: minio
|
||||
name: console
|
||||
|
||||
before:
|
||||
hooks:
|
||||
# you may remove this if you don't use vgo
|
||||
- go mod tidy
|
||||
|
||||
builds:
|
||||
-
|
||||
goos:
|
||||
@@ -18,12 +25,12 @@ builds:
|
||||
- arm64
|
||||
env:
|
||||
- CGO_ENABLED=0
|
||||
main: ./cmd/mcs/
|
||||
main: ./cmd/console/
|
||||
flags:
|
||||
- -trimpath
|
||||
- --tags=kqueue
|
||||
ldflags:
|
||||
- -s -w -X github.com/minio/mcs/pkg.ReleaseTag={{.Tag}} -X github.com/minio/mcs/pkg.CommitID={{.FullCommit}} -X github.com/minio/mcs/pkg.Version={{.Version}} -X github.com/minio/mcs/pkg.ShortCommitID={{.ShortCommit}} -X github.com/minio/mcs/pkg.ReleaseTime={{.Date}}
|
||||
- -s -w -X github.com/minio/console/pkg.ReleaseTag={{.Tag}} -X github.com/minio/console/pkg.CommitID={{.FullCommit}} -X github.com/minio/console/pkg.Version={{.Version}} -X github.com/minio/console/pkg.ShortCommitID={{.ShortCommit}} -X github.com/minio/console/pkg.ReleaseTime={{.Date}}
|
||||
archives:
|
||||
-
|
||||
replacements:
|
||||
@@ -51,7 +58,7 @@ changelog:
|
||||
nfpms:
|
||||
-
|
||||
vendor: MinIO Inc.
|
||||
homepage: https://github.com/minio/mcs
|
||||
homepage: https://github.com/minio/console
|
||||
maintainer: MinIO <minio@minio.io>
|
||||
description: MinIO Console Server
|
||||
license: GNU Affero General Public License v3.0
|
||||
@@ -71,5 +78,5 @@ dockers:
|
||||
goarch: amd64
|
||||
dockerfile: Dockerfile.release
|
||||
image_templates:
|
||||
- "minio/mcs:{{ .Tag }}"
|
||||
- "minio/mcs:latest"
|
||||
- "minio/console:{{ .Tag }}"
|
||||
- "minio/console:latest"
|
||||
|
||||
@@ -20,7 +20,7 @@ make swagger-gen
|
||||
|
||||
This will update all the necessary code.
|
||||
|
||||
`./restapi/configure_mcs.go` is a file that contains the handlers to be used by the application, here is the only place where we need to update our code to support the new apis. This file is not affected when running the swagger generator and it is safe to edit.
|
||||
`./restapi/configure_console.go` is a file that contains the handlers to be used by the application, here is the only place where we need to update our code to support the new apis. This file is not affected when running the swagger generator and it is safe to edit.
|
||||
|
||||
## Unit Tests
|
||||
`./restapi/handlers_test.go` needs to be updated with the proper tests for the new api.
|
||||
@@ -47,7 +47,7 @@ $ git push origin my-new-feature
|
||||
Pull requests can be created via GitHub. Refer to [this document](https://help.github.com/articles/creating-a-pull-request/) for detailed steps on how to create a pull request. After a Pull Request gets peer reviewed and approved, it will be merged.
|
||||
|
||||
## FAQs
|
||||
### How does ``mcs`` manages dependencies?
|
||||
### How does ``console`` manages dependencies?
|
||||
``MinIO`` uses `go mod` to manage its dependencies.
|
||||
- Run `go get foo/bar` in the source folder to add the dependency to `go.mod` file.
|
||||
|
||||
@@ -55,5 +55,5 @@ To remove a dependency
|
||||
- Edit your code and remove the import reference.
|
||||
- Run `go mod tidy` in the source folder to remove dependency from `go.mod` file.
|
||||
|
||||
### What are the coding guidelines for mcs?
|
||||
``mcs`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.min.io).
|
||||
### What are the coding guidelines for console?
|
||||
``console`` is fully conformant with Golang style. Refer: [Effective Go](https://github.com/golang/go/wiki/CodeReviewComments) article from Golang project. If you observe offending code, please feel free to send a pull request or ping us on [Slack](https://slack.min.io).
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
# LDAP authentication with MCS
|
||||
# LDAP authentication with Console
|
||||
|
||||
## Setup
|
||||
|
||||
@@ -32,13 +32,13 @@ dn: ou=groups,dc=example,dc=org
|
||||
objectclass:organizationalunit
|
||||
ou: groups
|
||||
description: generic groups branch
|
||||
# create mcsAdmin group (this already exists on minio and have a policy of s3::*)
|
||||
dn: cn=mcsAdmin,ou=groups,dc=example,dc=org
|
||||
# create consoleAdmin group (this already exists on minio and have a policy of s3::*)
|
||||
dn: cn=consoleAdmin,ou=groups,dc=example,dc=org
|
||||
objectClass: top
|
||||
objectClass: posixGroup
|
||||
gidNumber: 678
|
||||
# Assing group to new user
|
||||
dn: cn=mcsAdmin,ou=groups,dc=example,dc=org
|
||||
dn: cn=consoleAdmin,ou=groups,dc=example,dc=org
|
||||
changetype: modify
|
||||
add: memberuid
|
||||
memberuid: billy
|
||||
@@ -48,7 +48,7 @@ $ docker cp billy.ldif my-openldap-container:/container/service/slapd/assets/tes
|
||||
$ docker exec my-openldap-container ldapadd -x -D "cn=admin,dc=example,dc=org" -w admin -f /container/service/slapd/assets/test/billy.ldif -H ldap://localhost -ZZ
|
||||
```
|
||||
|
||||
Query the ldap server to check the user billy was created correctly and got assigned to the mcsAdmin group, you should get a list
|
||||
Query the ldap server to check the user billy was created correctly and got assigned to the consoleAdmin group, you should get a list
|
||||
containing ldap users and groups.
|
||||
|
||||
```
|
||||
@@ -73,9 +73,9 @@ Re-enter new password:
|
||||
Enter LDAP Password:
|
||||
```
|
||||
|
||||
### Add the mcsAdmin policy to user billy on MinIO
|
||||
### Add the consoleAdmin policy to user billy on MinIO
|
||||
```
|
||||
$ cat > mcsAdmin.json << EOF
|
||||
$ cat > consoleAdmin.json << EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
@@ -99,8 +99,8 @@ $ cat > mcsAdmin.json << EOF
|
||||
]
|
||||
}
|
||||
EOF
|
||||
$ mc admin policy add myminio mcsAdmin mcsAdmin.json
|
||||
$ mc admin policy set myminio mcsAdmin user=billy
|
||||
$ mc admin policy add myminio consoleAdmin consoleAdmin.json
|
||||
$ mc admin policy set myminio consoleAdmin user=billy
|
||||
```
|
||||
|
||||
## Run MinIO
|
||||
@@ -116,12 +116,12 @@ export MINIO_IDENTITY_LDAP_SERVER_INSECURE=on
|
||||
./minio server ~/Data
|
||||
```
|
||||
|
||||
## Run MCS
|
||||
## Run Console
|
||||
|
||||
```
|
||||
export MCS_ACCESS_KEY=minio
|
||||
export MCS_SECRET_KEY=minio123
|
||||
export CONSOLE_ACCESS_KEY=minio
|
||||
export CONSOLE_SECRET_KEY=minio123
|
||||
...
|
||||
export MCS_LDAP_ENABLED=on
|
||||
./mcs server
|
||||
export CONSOLE_LDAP_ENABLED=on
|
||||
./console server
|
||||
```
|
||||
|
||||
16
Dockerfile
16
Dockerfile
@@ -2,25 +2,25 @@ FROM golang:1.13
|
||||
|
||||
RUN apt-get update -y && apt-get install -y ca-certificates
|
||||
|
||||
ADD go.mod /go/src/github.com/minio/mcs/go.mod
|
||||
ADD go.sum /go/src/github.com/minio/mcs/go.sum
|
||||
WORKDIR /go/src/github.com/minio/mcs/
|
||||
ADD go.mod /go/src/github.com/minio/console/go.mod
|
||||
ADD go.sum /go/src/github.com/minio/console/go.sum
|
||||
WORKDIR /go/src/github.com/minio/console/
|
||||
|
||||
# Get dependencies - will also be cached if we won't change mod/sum
|
||||
RUN go mod download
|
||||
|
||||
ADD . /go/src/github.com/minio/mcs/
|
||||
WORKDIR /go/src/github.com/minio/mcs/
|
||||
ADD . /go/src/github.com/minio/console/
|
||||
WORKDIR /go/src/github.com/minio/console/
|
||||
|
||||
ENV CGO_ENABLED=0
|
||||
|
||||
RUN go build -ldflags "-w -s" -a -o mcs ./cmd/mcs
|
||||
RUN go build -ldflags "-w -s" -a -o console ./cmd/console
|
||||
|
||||
FROM scratch
|
||||
MAINTAINER MinIO Development "dev@min.io"
|
||||
EXPOSE 9090
|
||||
|
||||
COPY --from=0 /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=0 /go/src/github.com/minio/mcs/mcs .
|
||||
COPY --from=0 /go/src/github.com/minio/console/console .
|
||||
|
||||
CMD ["/mcs"]
|
||||
ENTRYPOINT ["/console"]
|
||||
|
||||
@@ -1,6 +1,12 @@
|
||||
FROM ubuntu:18.04 as certs
|
||||
|
||||
RUN apt-get update -y && apt-get install -y ca-certificates
|
||||
|
||||
FROM scratch
|
||||
MAINTAINER MinIO Development "dev@min.io"
|
||||
EXPOSE 9090
|
||||
COPY mcs /mcs
|
||||
COPY console /console
|
||||
|
||||
ENTRYPOINT ["/mcs"]
|
||||
COPY --from=certs /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
|
||||
ENTRYPOINT ["/console"]
|
||||
|
||||
32
Makefile
32
Makefile
@@ -3,19 +3,19 @@ GOPATH := $(shell go env GOPATH)
|
||||
# Sets the build version based on the output of the following command, if we are building for a tag, that's the build else it uses the current git branch as the build
|
||||
BUILD_VERSION:=$(shell git describe --exact-match --tags $(git log -n1 --pretty='%h') 2>/dev/null || git rev-parse --abbrev-ref HEAD 2>/dev/null)
|
||||
BUILD_TIME:=$(shell date 2>/dev/null)
|
||||
TAG ?= "minio/m3:$(VERSION)-dev"
|
||||
TAG ?= "minio/console:$(VERSION)-dev"
|
||||
|
||||
default: mcs
|
||||
default: console
|
||||
|
||||
.PHONY: mcs
|
||||
mcs:
|
||||
@echo "Building mcs binary to './mcs'"
|
||||
@(GO111MODULE=on CGO_ENABLED=0 go build -trimpath --tags=kqueue --ldflags "-s -w" -o mcs ./cmd/mcs)
|
||||
.PHONY: console
|
||||
console:
|
||||
@echo "Building Console binary to './console'"
|
||||
@(GO111MODULE=on CGO_ENABLED=0 go build -trimpath --tags=kqueue --ldflags "-s -w" -o console ./cmd/console)
|
||||
|
||||
k8sdev:
|
||||
@docker build -t $(TAG) --build-arg build_version=$(BUILD_VERSION) --build-arg build_time='$(BUILD_TIME)' .
|
||||
@kind load docker-image $(TAG)
|
||||
@echo "Done, now restart your mcs deployment"
|
||||
@echo "Done, now restart your console deployment"
|
||||
|
||||
getdeps:
|
||||
@mkdir -p ${GOPATH}/bin
|
||||
@@ -33,32 +33,32 @@ lint:
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint cache clean
|
||||
@GO111MODULE=on ${GOPATH}/bin/golangci-lint run --timeout=5m --config ./.golangci.yml
|
||||
|
||||
install: mcs
|
||||
@echo "Installing mcs binary to '$(GOPATH)/bin/mcs'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/mcs $(GOPATH)/bin/mcs
|
||||
@echo "Installation successful. To learn more, try \"mcs --help\"."
|
||||
install: console
|
||||
@echo "Installing console binary to '$(GOPATH)/bin/console'"
|
||||
@mkdir -p $(GOPATH)/bin && cp -f $(PWD)/console $(GOPATH)/bin/console
|
||||
@echo "Installation successful. To learn more, try \"console --help\"."
|
||||
|
||||
swagger-gen:
|
||||
@echo "Generating swagger server code from yaml"
|
||||
@rm -rf models
|
||||
@rm -rf restapi/operations
|
||||
@swagger generate server -A mcs --main-package=mcs --exclude-main -P models.Principal -f ./swagger.yml -r NOTICE
|
||||
@swagger generate server -A console --main-package=console --exclude-main -P models.Principal -f ./swagger.yml -r NOTICE
|
||||
|
||||
assets:
|
||||
@(cd portal-ui; yarn install; make build-static; cd ..)
|
||||
|
||||
test:
|
||||
@(GO111MODULE=on go test -race -v github.com/minio/mcs/restapi/...)
|
||||
@(GO111MODULE=on go test -race -v github.com/minio/mcs/pkg/...)
|
||||
@(GO111MODULE=on go test -race -v github.com/minio/console/restapi/...)
|
||||
@(GO111MODULE=on go test -race -v github.com/minio/console/pkg/...)
|
||||
|
||||
coverage:
|
||||
@(GO111MODULE=on go test -v -coverprofile=coverage.out github.com/minio/mcs/restapi/... && go tool cover -html=coverage.out && open coverage.html)
|
||||
@(GO111MODULE=on go test -v -coverprofile=coverage.out github.com/minio/console/restapi/... && go tool cover -html=coverage.out && open coverage.html)
|
||||
|
||||
clean:
|
||||
@echo "Cleaning up all the generated files"
|
||||
@find . -name '*.test' | xargs rm -fv
|
||||
@find . -name '*~' | xargs rm -fv
|
||||
@rm -vf mcs
|
||||
@rm -vf console
|
||||
|
||||
docker:
|
||||
@docker build -t $(TAG) --build-arg build_version=$(BUILD_VERSION) --build-arg build_time='$(BUILD_TIME)' .
|
||||
|
||||
46
README.md
46
README.md
@@ -9,20 +9,20 @@ A graphical user interface for [MinIO](https://github.com/minio/minio)
|
||||
|
||||
## Setup
|
||||
|
||||
All `mcs` needs is a MinIO user with admin privileges and URL pointing to your MinIO deployment.
|
||||
All `console` needs is a MinIO user with admin privileges and URL pointing to your MinIO deployment.
|
||||
> Note: We don't recommend using MinIO's Operator Credentials
|
||||
|
||||
1. Create a user for `mcs` using `mc`.
|
||||
1. Create a user for `console` using `mc`.
|
||||
```
|
||||
$ set +o history
|
||||
$ mc admin user add myminio mcs YOURMCSSECRET
|
||||
$ mc admin user add myminio console YOURCONSOLESECRET
|
||||
$ set -o history
|
||||
```
|
||||
|
||||
2. Create a policy for `mcs` with access to everything (for testing and debugging)
|
||||
2. Create a policy for `console` with access to everything (for testing and debugging)
|
||||
|
||||
```
|
||||
$ cat > mcsAdmin.json << EOF
|
||||
$ cat > consoleAdmin.json << EOF
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [{
|
||||
@@ -45,18 +45,18 @@ $ cat > mcsAdmin.json << EOF
|
||||
]
|
||||
}
|
||||
EOF
|
||||
$ mc admin policy add myminio mcsAdmin mcsAdmin.json
|
||||
$ mc admin policy add myminio consoleAdmin consoleAdmin.json
|
||||
```
|
||||
|
||||
3. Set the policy for the new `mcs` user
|
||||
3. Set the policy for the new `console` user
|
||||
|
||||
```
|
||||
$ mc admin policy set myminio mcsAdmin user=mcs
|
||||
$ mc admin policy set myminio consoleAdmin user=console
|
||||
```
|
||||
|
||||
|
||||
### Note
|
||||
Additionally, you can create policies to limit the privileges for `mcs` users, for example, if you want the user to only have access to dashboard, buckets, notifications and watch page, the policy should look like this:
|
||||
Additionally, you can create policies to limit the privileges for `console` users, for example, if you want the user to only have access to dashboard, buckets, notifications and watch page, the policy should look like this:
|
||||
```
|
||||
{
|
||||
"Version": "2012-10-17",
|
||||
@@ -97,34 +97,34 @@ Additionally, you can create policies to limit the privileges for `mcs` users, f
|
||||
}
|
||||
```
|
||||
|
||||
## Run MCS server
|
||||
## Run Console server
|
||||
To run the server:
|
||||
|
||||
```
|
||||
export MCS_HMAC_JWT_SECRET=YOURJWTSIGNINGSECRET
|
||||
export CONSOLE_HMAC_JWT_SECRET=YOURJWTSIGNINGSECRET
|
||||
|
||||
#required to encrypt jwet payload
|
||||
export MCS_PBKDF_PASSPHRASE=SECRET
|
||||
export CONSOLE_PBKDF_PASSPHRASE=SECRET
|
||||
|
||||
#required to encrypt jwet payload
|
||||
export MCS_PBKDF_SALT=SECRET
|
||||
export CONSOLE_PBKDF_SALT=SECRET
|
||||
|
||||
export MCS_ACCESS_KEY=mcs
|
||||
export MCS_SECRET_KEY=YOURMCSSECRET
|
||||
export MCS_MINIO_SERVER=http://localhost:9000
|
||||
./mcs server
|
||||
export CONSOLE_ACCESS_KEY=console
|
||||
export CONSOLE_SECRET_KEY=YOURCONSOLESECRET
|
||||
export CONSOLE_MINIO_SERVER=http://localhost:9000
|
||||
./console server
|
||||
```
|
||||
|
||||
## Connect MCS to a Minio using TLS and a self-signed certificate
|
||||
## Connect Console to a Minio using TLS and a self-signed certificate
|
||||
|
||||
```
|
||||
...
|
||||
export MCS_MINIO_SERVER_TLS_ROOT_CAS=<certificate_file_name>
|
||||
export MCS_MINIO_SERVER=https://localhost:9000
|
||||
./mcs server
|
||||
export CONSOLE_MINIO_SERVER_TLS_ROOT_CAS=<certificate_file_name>
|
||||
export CONSOLE_MINIO_SERVER=https://localhost:9000
|
||||
./console server
|
||||
```
|
||||
|
||||
You can verify that the apis work by doing the request on `localhost:9090/api/v1/...`
|
||||
|
||||
# Contribute to mcs Project
|
||||
Please follow mcs [Contributor's Guide](https://github.com/minio/mcs/blob/master/CONTRIBUTING.md)
|
||||
# Contribute to console Project
|
||||
Please follow console [Contributor's Guide](https://github.com/minio/console/blob/master/CONTRIBUTING.md)
|
||||
|
||||
@@ -2,12 +2,12 @@
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We always provide security updates for the [latest release](https://github.com/minio/mcs/releases/latest).
|
||||
We always provide security updates for the [latest release](https://github.com/minio/console/releases/latest).
|
||||
Whenever there is a security update you just need to upgrade to the latest version.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
All security bugs in [minio/mcs](https://github,com/minio/mcs) (or other minio/* repositories)
|
||||
All security bugs in [minio/console](https://github,com/minio/console) (or other minio/* repositories)
|
||||
should be reported by email to security@min.io. Your email will be acknowledged within 48 hours,
|
||||
and you'll receive a more detailed response to your email within 72 hours indicating the next steps
|
||||
in handling your report.
|
||||
|
||||
@@ -39,19 +39,19 @@ func GetK8sAPIServer() string {
|
||||
// if console is not running inside k8s by default will look for the k8s api server on localhost:8001 (kubectl proxy)
|
||||
// NOTE: using kubectl proxy is for local development only, since every request send to localhost:8001 will bypass service account authentication
|
||||
// more info here: https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#directly-accessing-the-rest-api
|
||||
// you can override this using MCS_K8S_API_SERVER, ie use the k8s cluster from `kubectl config view`
|
||||
// you can override this using CONSOLE_K8S_API_SERVER, ie use the k8s cluster from `kubectl config view`
|
||||
host, port := env.Get("KUBERNETES_SERVICE_HOST", ""), env.Get("KUBERNETES_SERVICE_PORT", "")
|
||||
apiServerAddress := "http://localhost:8001"
|
||||
if host != "" && port != "" {
|
||||
apiServerAddress = "https://" + net.JoinHostPort(host, port)
|
||||
}
|
||||
return env.Get(McsK8sAPIServer, apiServerAddress)
|
||||
return env.Get(ConsoleK8sAPIServer, apiServerAddress)
|
||||
}
|
||||
|
||||
// If MCS_K8S_API_SERVER_TLS_ROOT_CA is true mcs will load the certificate into the
|
||||
// If CONSOLE_K8S_API_SERVER_TLS_ROOT_CA is true console will load the certificate into the
|
||||
// http.client rootCAs pool, this is useful for testing an k8s ApiServer or when working with self-signed certificates
|
||||
func getK8sAPIServerTLSRootCA() string {
|
||||
return strings.TrimSpace(env.Get(McsK8SAPIServerTLSRootCA, ""))
|
||||
return strings.TrimSpace(env.Get(ConsoleK8SAPIServerTLSRootCA, ""))
|
||||
}
|
||||
|
||||
// GetNsFromFile assumes console is running inside a k8s pod and extract the current namespace from the
|
||||
@@ -69,7 +69,7 @@ var namespace = GetNsFromFile()
|
||||
|
||||
// Returns the namespace in which the controller is installed
|
||||
func GetNs() string {
|
||||
return env.Get(McsNamespace, namespace)
|
||||
return env.Get(ConsoleNamespace, namespace)
|
||||
}
|
||||
|
||||
// getLatestMinIOImage returns the latest docker image for MinIO if found on the internet
|
||||
@@ -106,7 +106,7 @@ var latestMinIOImage, errLatestMinIOImage = getLatestMinIOImage(
|
||||
// a preferred image to be used (configured via ENVIRONMENT VARIABLES) GetMinioImage will return that
|
||||
// if not, GetMinioImage will try to obtain the image URL for the latest version of MinIO and return that
|
||||
func GetMinioImage() (*string, error) {
|
||||
image := strings.TrimSpace(env.Get(McsMinioImage, ""))
|
||||
image := strings.TrimSpace(env.Get(ConsoleMinioImage, ""))
|
||||
// if there is a preferred image configured by the user we'll always return that
|
||||
if image != "" {
|
||||
return &image, nil
|
||||
@@ -156,7 +156,7 @@ func getLatestMCImage() (*string, error) {
|
||||
var latestMCImage, errLatestMCImage = getLatestMCImage()
|
||||
|
||||
func GetMCImage() (*string, error) {
|
||||
image := strings.TrimSpace(env.Get(McsMCImage, ""))
|
||||
image := strings.TrimSpace(env.Get(ConsoleMCImage, ""))
|
||||
// if there is a preferred image configured by the user we'll always return that
|
||||
if image != "" {
|
||||
return &image, nil
|
||||
|
||||
@@ -17,9 +17,9 @@
|
||||
package cluster
|
||||
|
||||
const (
|
||||
McsK8sAPIServer = "MCS_K8S_API_SERVER"
|
||||
McsK8SAPIServerTLSRootCA = "MCS_K8S_API_SERVER_TLS_ROOT_CA"
|
||||
McsMinioImage = "MCS_MINIO_IMAGE"
|
||||
McsMCImage = "MCS_MC_IMAGE"
|
||||
McsNamespace = "MCS_NAMESPACE"
|
||||
ConsoleK8sAPIServer = "CONSOLE_K8S_API_SERVER"
|
||||
ConsoleK8SAPIServerTLSRootCA = "CONSOLE_K8S_API_SERVER_TLS_ROOT_CA"
|
||||
ConsoleMinioImage = "CONSOLE_MINIO_IMAGE"
|
||||
ConsoleMCImage = "CONSOLE_MC_IMAGE"
|
||||
ConsoleNamespace = "CONSOLE_NAMESPACE"
|
||||
)
|
||||
|
||||
@@ -23,7 +23,7 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/minio/mcs/pkg"
|
||||
"github.com/minio/console/pkg"
|
||||
|
||||
"github.com/minio/minio/pkg/console"
|
||||
"github.com/minio/minio/pkg/trie"
|
||||
@@ -32,8 +32,8 @@ import (
|
||||
"github.com/minio/cli"
|
||||
)
|
||||
|
||||
// Help template for mcs.
|
||||
var mcsHelpTemplate = `NAME:
|
||||
// Help template for Console.
|
||||
var consoleHelpTemplate = `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
DESCRIPTION:
|
||||
@@ -57,10 +57,10 @@ var appCmds = []cli.Command{
|
||||
}
|
||||
|
||||
func newApp(name string) *cli.App {
|
||||
// Collection of mcs commands currently supported are.
|
||||
// Collection of console commands currently supported are.
|
||||
var commands []cli.Command
|
||||
|
||||
// Collection of mcs commands currently supported in a trie tree.
|
||||
// Collection of console commands currently supported in a trie tree.
|
||||
commandsTree := trie.NewTrie()
|
||||
|
||||
// registerCommand registers a cli command.
|
||||
@@ -112,9 +112,9 @@ func newApp(name string) *cli.App {
|
||||
app.Compiled, _ = time.Parse(time.RFC3339, pkg.ReleaseTime)
|
||||
app.Commands = commands
|
||||
app.HideHelpCommand = true // Hide `help, h` command, we already have `minio --help`.
|
||||
app.CustomAppHelpTemplate = mcsHelpTemplate
|
||||
app.CustomAppHelpTemplate = consoleHelpTemplate
|
||||
app.CommandNotFound = func(ctx *cli.Context, command string) {
|
||||
console.Printf("‘%s’ is not a mcs sub-command. See ‘mcs --help’.\n", command)
|
||||
console.Printf("‘%s’ is not a console sub-command. See ‘console --help’.\n", command)
|
||||
closestCommands := findClosestCommands(command)
|
||||
if len(closestCommands) > 0 {
|
||||
console.Println()
|
||||
@@ -24,15 +24,15 @@ import (
|
||||
"github.com/go-openapi/loads"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"github.com/minio/cli"
|
||||
"github.com/minio/mcs/restapi"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/console/restapi"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
)
|
||||
|
||||
// starts the server
|
||||
var serverCmd = cli.Command{
|
||||
Name: "server",
|
||||
Aliases: []string{"srv"},
|
||||
Usage: "starts mcs server",
|
||||
Usage: "starts Console server",
|
||||
Action: startServer,
|
||||
Flags: []cli.Flag{
|
||||
cli.StringFlag{
|
||||
@@ -75,7 +75,7 @@ func startServer(ctx *cli.Context) error {
|
||||
log.Fatalln(err)
|
||||
}
|
||||
|
||||
api := operations.NewMcsAPI(swaggerSpec)
|
||||
api := operations.NewConsoleAPI(swaggerSpec)
|
||||
server := restapi.NewServer(api)
|
||||
defer server.Shutdown()
|
||||
|
||||
@@ -112,7 +112,7 @@ func startServer(ctx *cli.Context) error {
|
||||
if tlsCertificatePath != "" && tlsCertificateKeyPath != "" {
|
||||
server.TLSCertificate = flags.Filename(tlsCertificatePath)
|
||||
server.TLSCertificateKey = flags.Filename(tlsCertificateKeyPath)
|
||||
// If TLS certificates are provided enforce the HTTPS schema, meaning mcs will redirect
|
||||
// If TLS certificates are provided enforce the HTTPS schema, meaning console will redirect
|
||||
// plain HTTP connections to HTTPS server
|
||||
server.EnabledListeners = []string{"http", "https"}
|
||||
server.TLSPort = ctx.Int("tls-port")
|
||||
39
docs/console_operator_mode.md
Normal file
39
docs/console_operator_mode.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# Running Console in Operator mode
|
||||
|
||||
`Console` will authenticate against `Kubernetes`using bearer tokens via HTTP `Authorization` header. The user will provide this token once
|
||||
in the login form, Console will validate it against Kubernetes (list apis) and if valid will generate and return a new Console sessions
|
||||
with encrypted claims (the user Service account token will be inside the JWT in the data field)
|
||||
|
||||
# Kubernetes
|
||||
|
||||
The provided `JWT token` corresponds to the `Kubernetes service account` that `Console` will use to run tasks on behalf of the
|
||||
user, ie: list, create, edit, delete tenants, storage class, etc.
|
||||
|
||||
|
||||
# Development
|
||||
|
||||
If console is running inside a k8s pod `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` will contain the k8s api server apiServerAddress
|
||||
if console is not running inside k8s by default will look for the k8s api server on `localhost:8001` (kubectl proxy)
|
||||
|
||||
If you are running console in your local environment and wish to make request to `Kubernetes` you can set `CONSOLE_K8S_API_SERVER`, if
|
||||
the environment variable is not present by default `Console` will use `"http://localhost:8001"`, additionally you will need to set the
|
||||
`CONSOLE_OPERATOR_MODE=on` variable to make Console display the Operator UI.
|
||||
|
||||
NOTE: using `kubectl` proxy is for local development only, since every request send to localhost:8001 will bypass service account authentication
|
||||
more info here: https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#directly-accessing-the-rest-api
|
||||
you can override this using `CONSOLE_K8S_API_SERVER`, ie use the k8s cluster from `kubectl config view`
|
||||
|
||||
## Extract the Service account token and use it with Console
|
||||
|
||||
For local development you can use the jwt associated to the `console-sa` service account, you can get the token running
|
||||
the following command in your terminal:
|
||||
|
||||
```
|
||||
kubectl get secret $(kubectl get serviceaccount console-sa -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" | base64 --decode
|
||||
```
|
||||
|
||||
Then run the Console server
|
||||
|
||||
```
|
||||
CONSOLE_OPERATOR_MODE=on ./console server
|
||||
```
|
||||
@@ -1,39 +0,0 @@
|
||||
# Running MCS in Operator mode
|
||||
|
||||
`MCS` will authenticate against `Kubernetes`using bearer tokens via HTTP `Authorization` header. The user will provide this token once
|
||||
in the login form, MCS will validate it against Kubernetes (list apis) and if valid will generate and return a new MCS sessions
|
||||
with encrypted claims (the user Service account token will be inside the JWT in the data field)
|
||||
|
||||
# Kubernetes
|
||||
|
||||
The provided `JWT token` corresponds to the `Kubernetes service account` that `MCS` will use to run tasks on behalf of the
|
||||
user, ie: list, create, edit, delete tenants, storage class, etc.
|
||||
|
||||
|
||||
# Development
|
||||
|
||||
If console is running inside a k8s pod `KUBERNETES_SERVICE_HOST` and `KUBERNETES_SERVICE_PORT` will contain the k8s api server apiServerAddress
|
||||
if console is not running inside k8s by default will look for the k8s api server on `localhost:8001` (kubectl proxy)
|
||||
|
||||
If you are running mcs in your local environment and wish to make request to `Kubernetes` you can set `MCS_K8S_API_SERVER`, if
|
||||
the environment variable is not present by default `MCS` will use `"http://localhost:8001"`, additionally you will need to set the
|
||||
`MCS_OPERATOR_MODE=on` variable to make MCS display the Operator UI.
|
||||
|
||||
NOTE: using `kubectl` proxy is for local development only, since every request send to localhost:8001 will bypass service account authentication
|
||||
more info here: https://kubernetes.io/docs/tasks/access-application-cluster/access-cluster/#directly-accessing-the-rest-api
|
||||
you can override this using `MCS_K8S_API_SERVER`, ie use the k8s cluster from `kubectl config view`
|
||||
|
||||
## Extract the Service account token and use it with MCS
|
||||
|
||||
For local development you can use the jwt associated to the `mcs-sa` service account, you can get the token running
|
||||
the following command in your terminal:
|
||||
|
||||
```
|
||||
kubectl get secret $(kubectl get serviceaccount mcs-sa -o jsonpath="{.secrets[0].name}") -o jsonpath="{.data.token}" | base64 --decode
|
||||
```
|
||||
|
||||
Then run the mcs server
|
||||
|
||||
```
|
||||
MCS_OPERATOR_MODE=on ./mcs server
|
||||
```
|
||||
4
go.mod
4
go.mod
@@ -1,4 +1,4 @@
|
||||
module github.com/minio/mcs
|
||||
module github.com/minio/console
|
||||
|
||||
go 1.13
|
||||
|
||||
@@ -20,7 +20,7 @@ require (
|
||||
github.com/minio/mc v0.0.0-20200725183142-90d22b271f60
|
||||
github.com/minio/minio v0.0.0-20200725154241-abbf6ce6ccf8
|
||||
github.com/minio/minio-go/v7 v7.0.2-0.20200722162308-e0105ca08252
|
||||
github.com/minio/operator v0.0.0-20200725185636-4a625e4fbb31
|
||||
github.com/minio/operator v0.0.0-20200730044813-c2895a5065a1
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
||||
github.com/satori/go.uuid v1.2.0
|
||||
github.com/stretchr/testify v1.6.1
|
||||
|
||||
10
go.sum
10
go.sum
@@ -143,6 +143,8 @@ github.com/go-ldap/ldap v3.0.2+incompatible h1:kD5HQcAzlQ7yrhfn+h+MSABeAy/jAJhvI
|
||||
github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
|
||||
@@ -463,10 +465,8 @@ github.com/minio/minio v0.0.0-20200725154241-abbf6ce6ccf8/go.mod h1:NBWtYp4t5pt3
|
||||
github.com/minio/minio-go/v7 v7.0.1/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns=
|
||||
github.com/minio/minio-go/v7 v7.0.2-0.20200722162308-e0105ca08252 h1:V2JkMDoSmEIhRcMJwX3qeJVOzy1B5bHpHbZaQu77vbs=
|
||||
github.com/minio/minio-go/v7 v7.0.2-0.20200722162308-e0105ca08252/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns=
|
||||
github.com/minio/operator v0.0.0-20200725185636-4a625e4fbb31 h1:FzB4mwu62LnVgOFhB7fLu15fAgIh6+1PHJoytkQPEsY=
|
||||
github.com/minio/operator v0.0.0-20200725185636-4a625e4fbb31 h1:FzB4mwu62LnVgOFhB7fLu15fAgIh6+1PHJoytkQPEsY=
|
||||
github.com/minio/operator v0.0.0-20200725185636-4a625e4fbb31/go.mod h1:G0pMmQFV5b5OrH7/OmVKtPoHzj3SmHNgqDlTew1NM/Y=
|
||||
github.com/minio/operator v0.0.0-20200725185636-4a625e4fbb31/go.mod h1:G0pMmQFV5b5OrH7/OmVKtPoHzj3SmHNgqDlTew1NM/Y=
|
||||
github.com/minio/operator v0.0.0-20200730044813-c2895a5065a1 h1:cTgvRgFBUVxbnxhQUioT2T7SH0M7AyvO7dDX32yKPGw=
|
||||
github.com/minio/operator v0.0.0-20200730044813-c2895a5065a1/go.mod h1:RLhFkLcL65qmrgUQJHrRwb1Lb4yHgD/DfjNENY2WNXg=
|
||||
github.com/minio/selfupdate v0.3.0 h1:1qfaZscU3hWwX1cF5m5Dov8Z5aZNvPHk9LROzIkas1k=
|
||||
github.com/minio/selfupdate v0.3.0/go.mod h1:b8ThJzzH7u2MkF6PcIra7KaXO9Khf6alWPvMSyTDCFM=
|
||||
github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU=
|
||||
@@ -917,6 +917,8 @@ k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUc
|
||||
k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
|
||||
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
|
||||
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
|
||||
k8s.io/klog/v2 v2.3.0 h1:WmkrnW7fdrm0/DMClc+HIxtftvxVIPAhlVwMQo5yLco=
|
||||
k8s.io/klog/v2 v2.3.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c h1:/KUFqjjqAcY4Us6luF5RDNZ16KJtb49HfR3ZHB9qYXM=
|
||||
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
|
||||
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
|
||||
|
||||
@@ -25,7 +25,7 @@ go get -d k8s.io/code-generator/...
|
||||
# Checkout code-generator to compatible version
|
||||
#(cd $GOPATH/src/k8s.io/code-generator && git checkout origin/release-1.14 -B release-1.14)
|
||||
|
||||
REPOSITORY=github.com/minio/mcs
|
||||
REPOSITORY=github.com/minio/console
|
||||
$GOPATH/src/k8s.io/code-generator/generate-groups.sh all \
|
||||
$REPOSITORY/pkg/clientgen $REPOSITORY/pkg/apis networking.gke.io:v1beta2 \
|
||||
--go-header-file $SCRIPT_ROOT/hack/header.go.txt
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: mcs-sa-binding
|
||||
name: console-sa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: mcs-sa-role
|
||||
name: console-sa-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: mcs-sa
|
||||
name: console-sa
|
||||
namespace: default
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: mcs-sa-role
|
||||
name: console-sa-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
7
k8s/console/base/console-configmap.yaml
Normal file
7
k8s/console/base/console-configmap.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: console-env
|
||||
data:
|
||||
CONSOLE_PORT: "9090"
|
||||
CONSOLE_TLS_PORT: "9443"
|
||||
@@ -1,24 +1,23 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mcs
|
||||
name: console
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mcs
|
||||
app: console
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mcs
|
||||
app: console
|
||||
spec:
|
||||
serviceAccountName: mcs-sa
|
||||
serviceAccountName: console-sa
|
||||
containers:
|
||||
- name: mcs
|
||||
image: minio/mcs:latest
|
||||
- name: console
|
||||
image: minio/console:latest
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
args:
|
||||
- /mcs
|
||||
- server
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: mcs-sa
|
||||
name: console-sa
|
||||
namespace: default
|
||||
@@ -1,9 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mcs
|
||||
name: console
|
||||
labels:
|
||||
name: mcs
|
||||
name: console
|
||||
spec:
|
||||
ports:
|
||||
- port: 9090
|
||||
@@ -11,4 +11,4 @@ spec:
|
||||
- port: 9443
|
||||
name: https
|
||||
selector:
|
||||
app: mcs
|
||||
app: console
|
||||
@@ -2,10 +2,10 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
# beginning of customizations
|
||||
resources:
|
||||
- mcs-service-account.yaml
|
||||
- mcs-cluster-role.yaml
|
||||
- mcs-cluster-role-binding.yaml
|
||||
- mcs-configmap.yaml
|
||||
- mcs-service.yaml
|
||||
- mcs-deployment.yaml
|
||||
- console-service-account.yaml
|
||||
- console-cluster-role.yaml
|
||||
- console-cluster-role-binding.yaml
|
||||
- console-configmap.yaml
|
||||
- console-service.yaml
|
||||
- console-deployment.yaml
|
||||
- minio-operator.yaml
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mcs-env
|
||||
data:
|
||||
MCS_PORT: "9090"
|
||||
MCS_TLS_PORT: "9443"
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,16 +1,16 @@
|
||||
#!/bin/bash
|
||||
|
||||
# setup environment variables based on flags to see if we should build the docker containers again
|
||||
MCS_DOCKER="true"
|
||||
CONSOLE_DOCKER="true"
|
||||
|
||||
# evaluate flags
|
||||
# `-m` for mcs
|
||||
# `-m` for console
|
||||
|
||||
|
||||
while getopts ":m:" opt; do
|
||||
case $opt in
|
||||
m)
|
||||
MCS_DOCKER="$OPTARG"
|
||||
CONSOLE_DOCKER="$OPTARG"
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
@@ -34,11 +34,11 @@ echo "install metrics server"
|
||||
kubectl apply -f metrics-dev.yaml
|
||||
|
||||
# Whether or not to build the m3 container and load it to kind or just load it
|
||||
if [[ $MCS_DOCKER == "true" ]]; then
|
||||
if [[ $CONSOLE_DOCKER == "true" ]]; then
|
||||
# Build mkube
|
||||
make --directory=".." k8sdev TAG=minio/mcs:latest
|
||||
make --directory=".." k8sdev TAG=minio/console:latest
|
||||
else
|
||||
kind load docker-image minio/mcs:latest
|
||||
kind load docker-image minio/console:latest
|
||||
fi
|
||||
|
||||
echo "done"
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Get's the latest deployment file from MinIO Operator
|
||||
curl https://raw.githubusercontent.com/minio/operator/master/minio-operator.yaml > operator-console/base/minio-operator.yaml
|
||||
@@ -1,12 +1,12 @@
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: mcs-sa-binding
|
||||
name: console-sa-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: mcs-sa-role
|
||||
name: console-sa-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: mcs-sa
|
||||
name: console-sa
|
||||
namespace: default
|
||||
@@ -1,7 +1,7 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: mcs-sa-role
|
||||
name: console-sa-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
7
k8s/operator-console/base/console-configmap.yaml
Normal file
7
k8s/operator-console/base/console-configmap.yaml
Normal file
@@ -0,0 +1,7 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: console-env
|
||||
data:
|
||||
CONSOLE_PORT: "9090"
|
||||
CONSOLE_TLS_PORT: "9443"
|
||||
@@ -1,27 +1,26 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: mcs
|
||||
name: console
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: mcs
|
||||
app: console
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: mcs
|
||||
app: console
|
||||
spec:
|
||||
serviceAccountName: mcs-sa
|
||||
serviceAccountName: console-sa
|
||||
containers:
|
||||
- name: mcs
|
||||
image: minio/mcs:latest
|
||||
- name: console
|
||||
image: minio/console:latest
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
env:
|
||||
- name: MCS_OPERATOR_MODE
|
||||
- name: CONSOLE_OPERATOR_MODE
|
||||
value: "on"
|
||||
args:
|
||||
- /mcs
|
||||
- server
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
@@ -1,5 +1,5 @@
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: mcs-sa
|
||||
name: console-sa
|
||||
namespace: default
|
||||
@@ -1,9 +1,9 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: mcs
|
||||
name: console
|
||||
labels:
|
||||
name: mcs
|
||||
name: console
|
||||
spec:
|
||||
ports:
|
||||
- port: 9090
|
||||
@@ -11,4 +11,4 @@ spec:
|
||||
- port: 9443
|
||||
name: https
|
||||
selector:
|
||||
app: mcs
|
||||
app: console
|
||||
@@ -2,10 +2,10 @@ apiVersion: kustomize.config.k8s.io/v1beta1
|
||||
kind: Kustomization
|
||||
# beginning of customizations
|
||||
resources:
|
||||
- mcs-service-account.yaml
|
||||
- mcs-cluster-role.yaml
|
||||
- mcs-cluster-role-binding.yaml
|
||||
- mcs-configmap.yaml
|
||||
- mcs-service.yaml
|
||||
- mcs-deployment.yaml
|
||||
- console-service-account.yaml
|
||||
- console-cluster-role.yaml
|
||||
- console-cluster-role-binding.yaml
|
||||
- console-configmap.yaml
|
||||
- console-service.yaml
|
||||
- console-deployment.yaml
|
||||
- minio-operator.yaml
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: mcs-env
|
||||
data:
|
||||
MCS_PORT: "9090"
|
||||
MCS_TLS_PORT: "9443"
|
||||
File diff suppressed because it is too large
Load Diff
@@ -26,8 +26,8 @@ CODEGEN_PKG=${CODEGEN_PKG:-$(cd "${SCRIPT_ROOT}"; ls -d -1 ./vendor/k8s.io/code-
|
||||
# k8s.io/kubernetes. The output-base is needed for the generators to output into the vendor dir
|
||||
# instead of the $GOPATH directly. For normal projects this can be dropped.
|
||||
bash "${CODEGEN_PKG}"/generate-groups.sh "all" \
|
||||
github.com/minio/mcs/pkg/generated \
|
||||
github.com/minio/mcs/pkg/apis \
|
||||
github.com/minio/console/pkg/generated \
|
||||
github.com/minio/console/pkg/apis \
|
||||
mkube:v1 \
|
||||
--go-header-file "${SCRIPT_ROOT}"/k8s/boilerplate.go.txt
|
||||
|
||||
|
||||
@@ -23,6 +23,7 @@ package models
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
@@ -35,12 +36,42 @@ type CreateTenantResponse struct {
|
||||
// access key
|
||||
AccessKey string `json:"access_key,omitempty"`
|
||||
|
||||
// console
|
||||
Console *CreateTenantResponseConsole `json:"console,omitempty"`
|
||||
|
||||
// secret key
|
||||
SecretKey string `json:"secret_key,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this create tenant response
|
||||
func (m *CreateTenantResponse) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateConsole(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *CreateTenantResponse) validateConsole(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.Console) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.Console != nil {
|
||||
if err := m.Console.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("console")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -61,3 +92,38 @@ func (m *CreateTenantResponse) UnmarshalBinary(b []byte) error {
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateTenantResponseConsole create tenant response console
|
||||
//
|
||||
// swagger:model CreateTenantResponseConsole
|
||||
type CreateTenantResponseConsole struct {
|
||||
|
||||
// access key
|
||||
AccessKey string `json:"access_key,omitempty"`
|
||||
|
||||
// secret key
|
||||
SecretKey string `json:"secret_key,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this create tenant response console
|
||||
func (m *CreateTenantResponseConsole) Validate(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *CreateTenantResponseConsole) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *CreateTenantResponseConsole) UnmarshalBinary(b []byte) error {
|
||||
var res CreateTenantResponseConsole
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
272
models/node_selector_term.go
Normal file
272
models/node_selector_term.go
Normal file
@@ -0,0 +1,272 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
// This file is part of MinIO Console Server
|
||||
// Copyright (c) 2020 MinIO, Inc.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
//
|
||||
|
||||
package models
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/go-openapi/validate"
|
||||
)
|
||||
|
||||
// NodeSelectorTerm A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.
|
||||
//
|
||||
// swagger:model nodeSelectorTerm
|
||||
type NodeSelectorTerm struct {
|
||||
|
||||
// A list of node selector requirements by node's labels.
|
||||
MatchExpressions []*NodeSelectorTermMatchExpressionsItems0 `json:"matchExpressions"`
|
||||
|
||||
// A list of node selector requirements by node's fields.
|
||||
MatchFields []*NodeSelectorTermMatchFieldsItems0 `json:"matchFields"`
|
||||
}
|
||||
|
||||
// Validate validates this node selector term
|
||||
func (m *NodeSelectorTerm) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateMatchExpressions(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateMatchFields(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NodeSelectorTerm) validateMatchExpressions(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.MatchExpressions) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.MatchExpressions); i++ {
|
||||
if swag.IsZero(m.MatchExpressions[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.MatchExpressions[i] != nil {
|
||||
if err := m.MatchExpressions[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("matchExpressions" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NodeSelectorTerm) validateMatchFields(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.MatchFields) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.MatchFields); i++ {
|
||||
if swag.IsZero(m.MatchFields[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.MatchFields[i] != nil {
|
||||
if err := m.MatchFields[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("matchFields" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *NodeSelectorTerm) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *NodeSelectorTerm) UnmarshalBinary(b []byte) error {
|
||||
var res NodeSelectorTerm
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeSelectorTermMatchExpressionsItems0 A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
//
|
||||
// swagger:model NodeSelectorTermMatchExpressionsItems0
|
||||
type NodeSelectorTermMatchExpressionsItems0 struct {
|
||||
|
||||
// The label key that the selector applies to.
|
||||
// Required: true
|
||||
Key *string `json:"key"`
|
||||
|
||||
// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
// Required: true
|
||||
Operator *string `json:"operator"`
|
||||
|
||||
// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
|
||||
Values []string `json:"values"`
|
||||
}
|
||||
|
||||
// Validate validates this node selector term match expressions items0
|
||||
func (m *NodeSelectorTermMatchExpressionsItems0) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateKey(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateOperator(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NodeSelectorTermMatchExpressionsItems0) validateKey(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("key", "body", m.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NodeSelectorTermMatchExpressionsItems0) validateOperator(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("operator", "body", m.Operator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *NodeSelectorTermMatchExpressionsItems0) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *NodeSelectorTermMatchExpressionsItems0) UnmarshalBinary(b []byte) error {
|
||||
var res NodeSelectorTermMatchExpressionsItems0
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// NodeSelectorTermMatchFieldsItems0 A node selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
//
|
||||
// swagger:model NodeSelectorTermMatchFieldsItems0
|
||||
type NodeSelectorTermMatchFieldsItems0 struct {
|
||||
|
||||
// The label key that the selector applies to.
|
||||
// Required: true
|
||||
Key *string `json:"key"`
|
||||
|
||||
// Represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist. Gt, and Lt.
|
||||
// Required: true
|
||||
Operator *string `json:"operator"`
|
||||
|
||||
// An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. If the operator is Gt or Lt, the values array must have a single element, which will be interpreted as an integer. This array is replaced during a strategic merge patch.
|
||||
Values []string `json:"values"`
|
||||
}
|
||||
|
||||
// Validate validates this node selector term match fields items0
|
||||
func (m *NodeSelectorTermMatchFieldsItems0) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateKey(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateOperator(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NodeSelectorTermMatchFieldsItems0) validateKey(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("key", "body", m.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *NodeSelectorTermMatchFieldsItems0) validateOperator(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("operator", "body", m.Operator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *NodeSelectorTermMatchFieldsItems0) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *NodeSelectorTermMatchFieldsItems0) UnmarshalBinary(b []byte) error {
|
||||
var res NodeSelectorTermMatchFieldsItems0
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
251
models/pod_affinity_term.go
Normal file
251
models/pod_affinity_term.go
Normal file
@@ -0,0 +1,251 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
// This file is part of MinIO Console Server
|
||||
// Copyright (c) 2020 MinIO, Inc.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
//
|
||||
|
||||
package models
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/go-openapi/validate"
|
||||
)
|
||||
|
||||
// PodAffinityTerm Required. A pod affinity term, associated with the corresponding weight.
|
||||
//
|
||||
// swagger:model podAffinityTerm
|
||||
type PodAffinityTerm struct {
|
||||
|
||||
// label selector
|
||||
LabelSelector *PodAffinityTermLabelSelector `json:"labelSelector,omitempty"`
|
||||
|
||||
// namespaces specifies which namespaces the labelSelector applies to (matches against); null or empty list means "this pod's namespace"
|
||||
Namespaces []string `json:"namespaces"`
|
||||
|
||||
// This pod should be co-located (affinity) or not co-located (anti-affinity) with the pods matching the labelSelector in the specified namespaces, where co-located is defined as running on a node whose value of the label with key topologyKey matches that of any node on which any of the selected pods is running. Empty topologyKey is not allowed.
|
||||
// Required: true
|
||||
TopologyKey *string `json:"topologyKey"`
|
||||
}
|
||||
|
||||
// Validate validates this pod affinity term
|
||||
func (m *PodAffinityTerm) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateLabelSelector(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateTopologyKey(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PodAffinityTerm) validateLabelSelector(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.LabelSelector) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.LabelSelector != nil {
|
||||
if err := m.LabelSelector.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("labelSelector")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PodAffinityTerm) validateTopologyKey(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("topologyKey", "body", m.TopologyKey); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *PodAffinityTerm) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *PodAffinityTerm) UnmarshalBinary(b []byte) error {
|
||||
var res PodAffinityTerm
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodAffinityTermLabelSelector A label query over a set of resources, in this case pods.
|
||||
//
|
||||
// swagger:model PodAffinityTermLabelSelector
|
||||
type PodAffinityTermLabelSelector struct {
|
||||
|
||||
// matchExpressions is a list of label selector requirements. The requirements are ANDed.
|
||||
MatchExpressions []*PodAffinityTermLabelSelectorMatchExpressionsItems0 `json:"matchExpressions"`
|
||||
|
||||
// matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
|
||||
MatchLabels map[string]string `json:"matchLabels,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this pod affinity term label selector
|
||||
func (m *PodAffinityTermLabelSelector) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateMatchExpressions(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PodAffinityTermLabelSelector) validateMatchExpressions(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.MatchExpressions) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.MatchExpressions); i++ {
|
||||
if swag.IsZero(m.MatchExpressions[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.MatchExpressions[i] != nil {
|
||||
if err := m.MatchExpressions[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("labelSelector" + "." + "matchExpressions" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *PodAffinityTermLabelSelector) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *PodAffinityTermLabelSelector) UnmarshalBinary(b []byte) error {
|
||||
var res PodAffinityTermLabelSelector
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// PodAffinityTermLabelSelectorMatchExpressionsItems0 A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
|
||||
//
|
||||
// swagger:model PodAffinityTermLabelSelectorMatchExpressionsItems0
|
||||
type PodAffinityTermLabelSelectorMatchExpressionsItems0 struct {
|
||||
|
||||
// key is the label key that the selector applies to.
|
||||
// Required: true
|
||||
Key *string `json:"key"`
|
||||
|
||||
// operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
|
||||
// Required: true
|
||||
Operator *string `json:"operator"`
|
||||
|
||||
// values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
|
||||
Values []string `json:"values"`
|
||||
}
|
||||
|
||||
// Validate validates this pod affinity term label selector match expressions items0
|
||||
func (m *PodAffinityTermLabelSelectorMatchExpressionsItems0) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateKey(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateOperator(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PodAffinityTermLabelSelectorMatchExpressionsItems0) validateKey(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("key", "body", m.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *PodAffinityTermLabelSelectorMatchExpressionsItems0) validateOperator(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("operator", "body", m.Operator); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *PodAffinityTermLabelSelectorMatchExpressionsItems0) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *PodAffinityTermLabelSelectorMatchExpressionsItems0) UnmarshalBinary(b []byte) error {
|
||||
var res PodAffinityTermLabelSelectorMatchExpressionsItems0
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
@@ -53,9 +53,6 @@ type Tenant struct {
|
||||
// total size
|
||||
TotalSize int64 `json:"total_size,omitempty"`
|
||||
|
||||
// used size
|
||||
UsedSize int64 `json:"used_size,omitempty"`
|
||||
|
||||
// zones
|
||||
Zones []*Zone `json:"zones"`
|
||||
}
|
||||
|
||||
@@ -47,12 +47,12 @@ type TenantList struct {
|
||||
// namespace
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
|
||||
// total size
|
||||
TotalSize int64 `json:"total_size,omitempty"`
|
||||
|
||||
// volume count
|
||||
VolumeCount int64 `json:"volume_count,omitempty"`
|
||||
|
||||
// volume size
|
||||
VolumeSize int64 `json:"volume_size,omitempty"`
|
||||
|
||||
// zone count
|
||||
ZoneCount int64 `json:"zone_count,omitempty"`
|
||||
}
|
||||
|
||||
60
models/tenant_usage.go
Normal file
60
models/tenant_usage.go
Normal file
@@ -0,0 +1,60 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
// This file is part of MinIO Console Server
|
||||
// Copyright (c) 2020 MinIO, Inc.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
//
|
||||
|
||||
package models
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// TenantUsage tenant usage
|
||||
//
|
||||
// swagger:model tenantUsage
|
||||
type TenantUsage struct {
|
||||
|
||||
// used size
|
||||
UsedSize int64 `json:"used_size,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this tenant usage
|
||||
func (m *TenantUsage) Validate(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *TenantUsage) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *TenantUsage) UnmarshalBinary(b []byte) error {
|
||||
var res TenantUsage
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
@@ -34,13 +34,25 @@ import (
|
||||
// swagger:model zone
|
||||
type Zone struct {
|
||||
|
||||
// affinity
|
||||
Affinity *ZoneAffinity `json:"affinity,omitempty"`
|
||||
|
||||
// name
|
||||
Name string `json:"name,omitempty"`
|
||||
|
||||
// NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/
|
||||
NodeSelector map[string]string `json:"node_selector,omitempty"`
|
||||
|
||||
// resources
|
||||
Resources *ZoneResources `json:"resources,omitempty"`
|
||||
|
||||
// servers
|
||||
// Required: true
|
||||
Servers *int64 `json:"servers"`
|
||||
|
||||
// tolerations
|
||||
Tolerations ZoneTolerations `json:"tolerations,omitempty"`
|
||||
|
||||
// volume configuration
|
||||
// Required: true
|
||||
VolumeConfiguration *ZoneVolumeConfiguration `json:"volume_configuration"`
|
||||
@@ -54,10 +66,22 @@ type Zone struct {
|
||||
func (m *Zone) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateAffinity(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateResources(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateServers(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateTolerations(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateVolumeConfiguration(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
@@ -72,6 +96,42 @@ func (m *Zone) Validate(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zone) validateAffinity(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.Affinity) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.Affinity != nil {
|
||||
if err := m.Affinity.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("affinity")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zone) validateResources(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.Resources) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.Resources != nil {
|
||||
if err := m.Resources.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("resources")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zone) validateServers(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("servers", "body", m.Servers); err != nil {
|
||||
@@ -81,6 +141,22 @@ func (m *Zone) validateServers(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zone) validateTolerations(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.Tolerations) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := m.Tolerations.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("tolerations")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *Zone) validateVolumeConfiguration(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("volume_configuration", "body", m.VolumeConfiguration); err != nil {
|
||||
|
||||
726
models/zone_affinity.go
Normal file
726
models/zone_affinity.go
Normal file
@@ -0,0 +1,726 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
// This file is part of MinIO Console Server
|
||||
// Copyright (c) 2020 MinIO, Inc.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
//
|
||||
|
||||
package models
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/go-openapi/validate"
|
||||
)
|
||||
|
||||
// ZoneAffinity If specified, affinity will define the pod's scheduling constraints
|
||||
//
|
||||
// swagger:model zoneAffinity
|
||||
type ZoneAffinity struct {
|
||||
|
||||
// node affinity
|
||||
NodeAffinity *ZoneAffinityNodeAffinity `json:"nodeAffinity,omitempty"`
|
||||
|
||||
// pod affinity
|
||||
PodAffinity *ZoneAffinityPodAffinity `json:"podAffinity,omitempty"`
|
||||
|
||||
// pod anti affinity
|
||||
PodAntiAffinity *ZoneAffinityPodAntiAffinity `json:"podAntiAffinity,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity
|
||||
func (m *ZoneAffinity) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateNodeAffinity(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validatePodAffinity(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validatePodAntiAffinity(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinity) validateNodeAffinity(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.NodeAffinity) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.NodeAffinity != nil {
|
||||
if err := m.NodeAffinity.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("nodeAffinity")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinity) validatePodAffinity(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.PodAffinity) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.PodAffinity != nil {
|
||||
if err := m.PodAffinity.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAffinity")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinity) validatePodAntiAffinity(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.PodAntiAffinity) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.PodAntiAffinity != nil {
|
||||
if err := m.PodAntiAffinity.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAntiAffinity")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinity) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinity) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinity
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneAffinityNodeAffinity Describes node affinity scheduling rules for the pod.
|
||||
//
|
||||
// swagger:model ZoneAffinityNodeAffinity
|
||||
type ZoneAffinityNodeAffinity struct {
|
||||
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.
|
||||
PreferredDuringSchedulingIgnoredDuringExecution []*ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"`
|
||||
|
||||
// required during scheduling ignored during execution
|
||||
RequiredDuringSchedulingIgnoredDuringExecution *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity node affinity
|
||||
func (m *ZoneAffinityNodeAffinity) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validatePreferredDuringSchedulingIgnoredDuringExecution(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateRequiredDuringSchedulingIgnoredDuringExecution(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityNodeAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.PreferredDuringSchedulingIgnoredDuringExecution); i++ {
|
||||
if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.PreferredDuringSchedulingIgnoredDuringExecution[i] != nil {
|
||||
if err := m.PreferredDuringSchedulingIgnoredDuringExecution[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("nodeAffinity" + "." + "preferredDuringSchedulingIgnoredDuringExecution" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityNodeAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
if m.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
if err := m.RequiredDuringSchedulingIgnoredDuringExecution.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("nodeAffinity" + "." + "requiredDuringSchedulingIgnoredDuringExecution")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinityNodeAffinity) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinityNodeAffinity) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinityNodeAffinity
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).
|
||||
//
|
||||
// swagger:model ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0
|
||||
type ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct {
|
||||
|
||||
// A node selector term, associated with the corresponding weight.
|
||||
// Required: true
|
||||
Preference *NodeSelectorTerm `json:"preference"`
|
||||
|
||||
// Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.
|
||||
// Required: true
|
||||
Weight *int32 `json:"weight"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity node affinity preferred during scheduling ignored during execution items0
|
||||
func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validatePreference(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateWeight(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePreference(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("preference", "body", m.Preference); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.Preference != nil {
|
||||
if err := m.Preference.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("preference")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("weight", "body", m.Weight); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.
|
||||
//
|
||||
// swagger:model ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution
|
||||
type ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution struct {
|
||||
|
||||
// Required. A list of node selector terms. The terms are ORed.
|
||||
// Required: true
|
||||
NodeSelectorTerms []*NodeSelectorTerm `json:"nodeSelectorTerms"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity node affinity required during scheduling ignored during execution
|
||||
func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validateNodeSelectorTerms(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) validateNodeSelectorTerms(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("nodeAffinity"+"."+"requiredDuringSchedulingIgnoredDuringExecution"+"."+"nodeSelectorTerms", "body", m.NodeSelectorTerms); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.NodeSelectorTerms); i++ {
|
||||
if swag.IsZero(m.NodeSelectorTerms[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.NodeSelectorTerms[i] != nil {
|
||||
if err := m.NodeSelectorTerms[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("nodeAffinity" + "." + "requiredDuringSchedulingIgnoredDuringExecution" + "." + "nodeSelectorTerms" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneAffinityPodAffinity Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).
|
||||
//
|
||||
// swagger:model ZoneAffinityPodAffinity
|
||||
type ZoneAffinityPodAffinity struct {
|
||||
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
|
||||
PreferredDuringSchedulingIgnoredDuringExecution []*ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"`
|
||||
|
||||
// If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
RequiredDuringSchedulingIgnoredDuringExecution []*PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity pod affinity
|
||||
func (m *ZoneAffinityPodAffinity) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validatePreferredDuringSchedulingIgnoredDuringExecution(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateRequiredDuringSchedulingIgnoredDuringExecution(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.PreferredDuringSchedulingIgnoredDuringExecution); i++ {
|
||||
if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.PreferredDuringSchedulingIgnoredDuringExecution[i] != nil {
|
||||
if err := m.PreferredDuringSchedulingIgnoredDuringExecution[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAffinity" + "." + "preferredDuringSchedulingIgnoredDuringExecution" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.RequiredDuringSchedulingIgnoredDuringExecution); i++ {
|
||||
if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.RequiredDuringSchedulingIgnoredDuringExecution[i] != nil {
|
||||
if err := m.RequiredDuringSchedulingIgnoredDuringExecution[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAffinity" + "." + "requiredDuringSchedulingIgnoredDuringExecution" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAffinity) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAffinity) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinityPodAffinity
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
//
|
||||
// swagger:model ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0
|
||||
type ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct {
|
||||
|
||||
// pod affinity term
|
||||
// Required: true
|
||||
PodAffinityTerm *PodAffinityTerm `json:"podAffinityTerm"`
|
||||
|
||||
// weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
|
||||
// Required: true
|
||||
Weight *int32 `json:"weight"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity pod affinity preferred during scheduling ignored during execution items0
|
||||
func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validatePodAffinityTerm(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateWeight(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePodAffinityTerm(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("podAffinityTerm", "body", m.PodAffinityTerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.PodAffinityTerm != nil {
|
||||
if err := m.PodAffinityTerm.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAffinityTerm")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("weight", "body", m.Weight); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneAffinityPodAntiAffinity Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).
|
||||
//
|
||||
// swagger:model ZoneAffinityPodAntiAffinity
|
||||
type ZoneAffinityPodAntiAffinity struct {
|
||||
|
||||
// The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.
|
||||
PreferredDuringSchedulingIgnoredDuringExecution []*ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"`
|
||||
|
||||
// If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.
|
||||
RequiredDuringSchedulingIgnoredDuringExecution []*PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity pod anti affinity
|
||||
func (m *ZoneAffinityPodAntiAffinity) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validatePreferredDuringSchedulingIgnoredDuringExecution(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateRequiredDuringSchedulingIgnoredDuringExecution(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAntiAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.PreferredDuringSchedulingIgnoredDuringExecution); i++ {
|
||||
if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.PreferredDuringSchedulingIgnoredDuringExecution[i] != nil {
|
||||
if err := m.PreferredDuringSchedulingIgnoredDuringExecution[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAntiAffinity" + "." + "preferredDuringSchedulingIgnoredDuringExecution" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAntiAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error {
|
||||
|
||||
if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution) { // not required
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := 0; i < len(m.RequiredDuringSchedulingIgnoredDuringExecution); i++ {
|
||||
if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m.RequiredDuringSchedulingIgnoredDuringExecution[i] != nil {
|
||||
if err := m.RequiredDuringSchedulingIgnoredDuringExecution[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAntiAffinity" + "." + "requiredDuringSchedulingIgnoredDuringExecution" + "." + strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAntiAffinity) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAntiAffinity) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinityPodAntiAffinity
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)
|
||||
//
|
||||
// swagger:model ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0
|
||||
type ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct {
|
||||
|
||||
// pod affinity term
|
||||
// Required: true
|
||||
PodAffinityTerm *PodAffinityTerm `json:"podAffinityTerm"`
|
||||
|
||||
// weight associated with matching the corresponding podAffinityTerm, in the range 1-100.
|
||||
// Required: true
|
||||
Weight *int32 `json:"weight"`
|
||||
}
|
||||
|
||||
// Validate validates this zone affinity pod anti affinity preferred during scheduling ignored during execution items0
|
||||
func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
if err := m.validatePodAffinityTerm(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if err := m.validateWeight(formats); err != nil {
|
||||
res = append(res, err)
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePodAffinityTerm(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("podAffinityTerm", "body", m.PodAffinityTerm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if m.PodAffinityTerm != nil {
|
||||
if err := m.PodAffinityTerm.Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName("podAffinityTerm")
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error {
|
||||
|
||||
if err := validate.Required("weight", "body", m.Weight); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
63
models/zone_resources.go
Normal file
63
models/zone_resources.go
Normal file
@@ -0,0 +1,63 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
// This file is part of MinIO Console Server
|
||||
// Copyright (c) 2020 MinIO, Inc.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
//
|
||||
|
||||
package models
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// ZoneResources If provided, use these requests and limit for cpu/memory resource allocation
|
||||
//
|
||||
// swagger:model zoneResources
|
||||
type ZoneResources struct {
|
||||
|
||||
// Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
Limits map[string]int64 `json:"limits,omitempty"`
|
||||
|
||||
// Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/
|
||||
Requests map[string]int64 `json:"requests,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this zone resources
|
||||
func (m *ZoneResources) Validate(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneResources) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneResources) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneResources
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
106
models/zone_tolerations.go
Normal file
106
models/zone_tolerations.go
Normal file
@@ -0,0 +1,106 @@
|
||||
// Code generated by go-swagger; DO NOT EDIT.
|
||||
|
||||
// This file is part of MinIO Console Server
|
||||
// Copyright (c) 2020 MinIO, Inc.
|
||||
//
|
||||
// This program is free software: you can redistribute it and/or modify
|
||||
// it under the terms of the GNU Affero General Public License as published by
|
||||
// the Free Software Foundation, either version 3 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU Affero General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU Affero General Public License
|
||||
// along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
//
|
||||
|
||||
package models
|
||||
|
||||
// This file was generated by the swagger tool.
|
||||
// Editing this file might prove futile when you re-run the swagger generate command
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/strfmt"
|
||||
"github.com/go-openapi/swag"
|
||||
)
|
||||
|
||||
// ZoneTolerations Tolerations allows users to set entries like effect, key, operator, value.
|
||||
//
|
||||
// swagger:model zoneTolerations
|
||||
type ZoneTolerations []*ZoneTolerationsItems0
|
||||
|
||||
// Validate validates this zone tolerations
|
||||
func (m ZoneTolerations) Validate(formats strfmt.Registry) error {
|
||||
var res []error
|
||||
|
||||
for i := 0; i < len(m); i++ {
|
||||
if swag.IsZero(m[i]) { // not required
|
||||
continue
|
||||
}
|
||||
|
||||
if m[i] != nil {
|
||||
if err := m[i].Validate(formats); err != nil {
|
||||
if ve, ok := err.(*errors.Validation); ok {
|
||||
return ve.ValidateName(strconv.Itoa(i))
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(res) > 0 {
|
||||
return errors.CompositeValidationError(res...)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ZoneTolerationsItems0 The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.
|
||||
//
|
||||
// swagger:model ZoneTolerationsItems0
|
||||
type ZoneTolerationsItems0 struct {
|
||||
|
||||
// Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.
|
||||
Effect string `json:"effect,omitempty"`
|
||||
|
||||
// Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.
|
||||
Key string `json:"key,omitempty"`
|
||||
|
||||
// Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.
|
||||
Operator string `json:"operator,omitempty"`
|
||||
|
||||
// TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.
|
||||
TolerationSeconds int64 `json:"tolerationSeconds,omitempty"`
|
||||
|
||||
// Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.
|
||||
Value string `json:"value,omitempty"`
|
||||
}
|
||||
|
||||
// Validate validates this zone tolerations items0
|
||||
func (m *ZoneTolerationsItems0) Validate(formats strfmt.Registry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// MarshalBinary interface implementation
|
||||
func (m *ZoneTolerationsItems0) MarshalBinary() ([]byte, error) {
|
||||
if m == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return swag.WriteJSON(m)
|
||||
}
|
||||
|
||||
// UnmarshalBinary interface implementation
|
||||
func (m *ZoneTolerationsItems0) UnmarshalBinary(b []byte) error {
|
||||
var res ZoneTolerationsItems0
|
||||
if err := swag.ReadJSON(b, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*m = res
|
||||
return nil
|
||||
}
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
// GetOperatorMode gets MCS Operator mode status set on env variable or default one
|
||||
// GetOperatorMode gets Console Operator mode status set on env variable or default one
|
||||
func GetOperatorMode() bool {
|
||||
return strings.ToLower(env.Get(mcsOperatorMode, "off")) == "on"
|
||||
return strings.ToLower(env.Get(consoleOperatorMode, "off")) == "on"
|
||||
}
|
||||
|
||||
@@ -17,5 +17,5 @@
|
||||
package acl
|
||||
|
||||
const (
|
||||
mcsOperatorMode = "MCS_OPERATOR_MODE"
|
||||
consoleOperatorMode = "CONSOLE_OPERATOR_MODE"
|
||||
)
|
||||
|
||||
@@ -19,7 +19,7 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/minio/mcs/pkg/auth/idp/oauth2"
|
||||
"github.com/minio/console/pkg/auth/idp/oauth2"
|
||||
)
|
||||
|
||||
// IdentityProviderClient interface with all functions to be implemented
|
||||
|
||||
@@ -19,29 +19,29 @@
|
||||
package oauth2
|
||||
|
||||
import (
|
||||
"github.com/minio/mcs/pkg/auth/utils"
|
||||
"github.com/minio/console/pkg/auth/utils"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
func GetIdpURL() string {
|
||||
return env.Get(McsIdpURL, "")
|
||||
return env.Get(ConsoleIdpURL, "")
|
||||
}
|
||||
|
||||
func GetIdpClientID() string {
|
||||
return env.Get(McsIdpClientID, "")
|
||||
return env.Get(ConsoleIdpClientID, "")
|
||||
}
|
||||
|
||||
func GetIdpSecret() string {
|
||||
return env.Get(McsIdpSecret, "")
|
||||
return env.Get(ConsoleIdpSecret, "")
|
||||
}
|
||||
|
||||
// Public endpoint used by the identity oidcProvider when redirecting the user after identity verification
|
||||
func GetIdpCallbackURL() string {
|
||||
return env.Get(McsIdpCallbackURL, "")
|
||||
return env.Get(ConsoleIdpCallbackURL, "")
|
||||
}
|
||||
|
||||
func GetIdpAdminRoles() string {
|
||||
return env.Get(McsIdpAdminRoles, "")
|
||||
return env.Get(ConsoleIdpAdminRoles, "")
|
||||
}
|
||||
|
||||
func IsIdpEnabled() bool {
|
||||
@@ -55,17 +55,17 @@ var defaultPassphraseForIdpHmac = utils.RandomCharString(64)
|
||||
|
||||
// GetPassphraseForIdpHmac returns passphrase for the pbkdf2 function used to sign the oauth2 state parameter
|
||||
func getPassphraseForIdpHmac() string {
|
||||
return env.Get(McsIdpHmacPassphrase, defaultPassphraseForIdpHmac)
|
||||
return env.Get(ConsoleIdpHmacPassphrase, defaultPassphraseForIdpHmac)
|
||||
}
|
||||
|
||||
var defaultSaltForIdpHmac = utils.RandomCharString(64)
|
||||
|
||||
// GetSaltForIdpHmac returns salt for the pbkdf2 function used to sign the oauth2 state parameter
|
||||
func getSaltForIdpHmac() string {
|
||||
return env.Get(McsIdpHmacSalt, defaultSaltForIdpHmac)
|
||||
return env.Get(ConsoleIdpHmacSalt, defaultSaltForIdpHmac)
|
||||
}
|
||||
|
||||
// GetSaltForIdpHmac returns the policy to be assigned to the users authenticating via an IDP
|
||||
func GetIDPPolicyForUser() string {
|
||||
return env.Get(McsIdpPolicyUser, "mcsAdmin")
|
||||
return env.Get(ConsoleIdpPolicyUser, "consoleAdmin")
|
||||
}
|
||||
|
||||
@@ -18,12 +18,12 @@ package oauth2
|
||||
|
||||
const (
|
||||
// const for idp configuration
|
||||
McsIdpURL = "MCS_IDP_URL"
|
||||
McsIdpClientID = "MCS_IDP_CLIENT_ID"
|
||||
McsIdpSecret = "MCS_IDP_SECRET"
|
||||
McsIdpCallbackURL = "MCS_IDP_CALLBACK"
|
||||
McsIdpAdminRoles = "MCS_IDP_ADMIN_ROLES"
|
||||
McsIdpHmacPassphrase = "MCS_IDP_HMAC_PASSPHRASE"
|
||||
McsIdpHmacSalt = "MCS_IDP_HMAC_SALT"
|
||||
McsIdpPolicyUser = "MCS_IDP_POLICY_USER"
|
||||
ConsoleIdpURL = "CONSOLE_IDP_URL"
|
||||
ConsoleIdpClientID = "CONSOLE_IDP_CLIENT_ID"
|
||||
ConsoleIdpSecret = "CONSOLE_IDP_SECRET"
|
||||
ConsoleIdpCallbackURL = "CONSOLE_IDP_CALLBACK"
|
||||
ConsoleIdpAdminRoles = "CONSOLE_IDP_ADMIN_ROLES"
|
||||
ConsoleIdpHmacPassphrase = "CONSOLE_IDP_HMAC_PASSPHRASE"
|
||||
ConsoleIdpHmacSalt = "CONSOLE_IDP_HMAC_SALT"
|
||||
ConsoleIdpPolicyUser = "CONSOLE_IDP_POLICY_USER"
|
||||
)
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/coreos/go-oidc"
|
||||
"github.com/minio/mcs/pkg/auth/utils"
|
||||
"github.com/minio/console/pkg/auth/utils"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
xoauth2 "golang.org/x/oauth2"
|
||||
)
|
||||
@@ -94,7 +94,7 @@ type Provider struct {
|
||||
}
|
||||
|
||||
// derivedKey is the key used to compute the HMAC for signing the oauth state parameter
|
||||
// its derived using pbkdf on MCS_IDP_HMAC_PASSPHRASE with MCS_IDP_HMAC_SALT
|
||||
// its derived using pbkdf on CONSOLE_IDP_HMAC_PASSPHRASE with CONSOLE_IDP_HMAC_SALT
|
||||
var derivedKey = pbkdf2.Key([]byte(getPassphraseForIdpHmac()), []byte(getSaltForIdpHmac()), 4096, 32, sha1.New)
|
||||
|
||||
// NewOauth2ProviderClient instantiates a new oauth2 client using the configured credentials
|
||||
@@ -186,7 +186,7 @@ func (client *Provider) VerifyIdentity(ctx context.Context, code, state string)
|
||||
}
|
||||
|
||||
// validateOauth2State validates the provided state was originated using the same
|
||||
// instance (or one configured using the same secrets) of MCS, this is basically used to prevent CSRF attacks
|
||||
// instance (or one configured using the same secrets) of Console, this is basically used to prevent CSRF attacks
|
||||
// https://security.stackexchange.com/questions/20187/oauth2-cross-site-request-forgery-and-state-parameter
|
||||
func validateOauth2State(state string) bool {
|
||||
// state contains a base64 encoded string that may ends with "==", the browser encodes that to "%3D%3D"
|
||||
|
||||
@@ -32,8 +32,8 @@ import (
|
||||
|
||||
jwtgo "github.com/dgrijalva/jwt-go"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
xjwt "github.com/minio/mcs/pkg/auth/jwt"
|
||||
"github.com/minio/console/models"
|
||||
xjwt "github.com/minio/console/pkg/auth/jwt"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
uuid "github.com/satori/go.uuid"
|
||||
"golang.org/x/crypto/pbkdf2"
|
||||
@@ -46,7 +46,7 @@ var (
|
||||
errClaimsFormat = errors.New("encrypted jwt claims not in the right format")
|
||||
)
|
||||
|
||||
// derivedKey is the key used to encrypt the JWT claims, its derived using pbkdf on MCS_PBKDF_PASSPHRASE with MCS_PBKDF_SALT
|
||||
// derivedKey is the key used to encrypt the JWT claims, its derived using pbkdf on CONSOLE_PBKDF_PASSPHRASE with CONSOLE_PBKDF_SALT
|
||||
var derivedKey = pbkdf2.Key([]byte(xjwt.GetPBKDFPassphrase()), []byte(xjwt.GetPBKDFSalt()), 4096, 32, sha1.New)
|
||||
|
||||
// IsJWTValid returns true or false depending if the provided jwt is valid or not
|
||||
@@ -104,7 +104,7 @@ func NewJWTWithClaimsForClient(credentials *credentials.Value, actions []string,
|
||||
return "", err
|
||||
}
|
||||
claims := xjwt.NewStandardClaims()
|
||||
claims.SetExpiry(time.Now().UTC().Add(xjwt.GetMcsSTSAndJWTDurationTime()))
|
||||
claims.SetExpiry(time.Now().UTC().Add(xjwt.GetConsoleSTSAndJWTDurationTime()))
|
||||
claims.SetSubject(uuid.NewV4().String())
|
||||
claims.SetData(encryptedClaims)
|
||||
claims.SetAudience(audience)
|
||||
@@ -216,7 +216,7 @@ func GetClaimsFromTokenInRequest(req *http.Request) (*models.Principal, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Perform decryption of the JWT, if MCS is able to decrypt the JWT that means a valid session
|
||||
// Perform decryption of the JWT, if Console is able to decrypt the JWT that means a valid session
|
||||
// was used in the first place to get it
|
||||
claims, err := JWTAuthenticate(*sessionID)
|
||||
if err != nil {
|
||||
|
||||
@@ -20,31 +20,31 @@ import (
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/minio/mcs/pkg/auth/utils"
|
||||
"github.com/minio/console/pkg/auth/utils"
|
||||
"github.com/minio/minio/pkg/env"
|
||||
)
|
||||
|
||||
// defaultHmacJWTPassphrase will be used by default if application is not configured with a custom MCS_HMAC_JWT_SECRET secret
|
||||
// defaultHmacJWTPassphrase will be used by default if application is not configured with a custom CONSOLE_HMAC_JWT_SECRET secret
|
||||
var defaultHmacJWTPassphrase = utils.RandomCharString(64)
|
||||
|
||||
// GetHmacJWTSecret returns the 64 bytes secret used for signing the generated JWT for the application
|
||||
func GetHmacJWTSecret() string {
|
||||
return env.Get(McsHmacJWTSecret, defaultHmacJWTPassphrase)
|
||||
return env.Get(ConsoleHmacJWTSecret, defaultHmacJWTPassphrase)
|
||||
}
|
||||
|
||||
// McsSTSAndJWTDurationSeconds returns the default session duration for the STS requested tokens and the generated JWTs.
|
||||
// ConsoleSTSAndJWTDurationSeconds returns the default session duration for the STS requested tokens and the generated JWTs.
|
||||
// Ideally both values should match so jwt and Minio sts sessions expires at the same time.
|
||||
func GetMcsSTSAndJWTDurationInSeconds() int {
|
||||
duration, err := strconv.Atoi(env.Get(McsSTSAndJWTDurationSeconds, "3600"))
|
||||
func GetConsoleSTSAndJWTDurationInSeconds() int {
|
||||
duration, err := strconv.Atoi(env.Get(ConsoleSTSAndJWTDurationSeconds, "3600"))
|
||||
if err != nil {
|
||||
duration = 3600
|
||||
}
|
||||
return duration
|
||||
}
|
||||
|
||||
// GetMcsSTSAndJWTDurationTime returns GetMcsSTSAndJWTDurationInSeconds in duration format
|
||||
func GetMcsSTSAndJWTDurationTime() time.Duration {
|
||||
duration := GetMcsSTSAndJWTDurationInSeconds()
|
||||
// GetConsoleSTSAndJWTDurationTime returns GetConsoleSTSAndJWTDurationInSeconds in duration format
|
||||
func GetConsoleSTSAndJWTDurationTime() time.Duration {
|
||||
duration := GetConsoleSTSAndJWTDurationInSeconds()
|
||||
return time.Duration(duration) * time.Second
|
||||
}
|
||||
|
||||
@@ -52,12 +52,12 @@ var defaultPBKDFPassphrase = utils.RandomCharString(64)
|
||||
|
||||
// GetPBKDFPassphrase returns passphrase for the pbkdf2 function used to encrypt JWT payload
|
||||
func GetPBKDFPassphrase() string {
|
||||
return env.Get(McsPBKDFPassphrase, defaultPBKDFPassphrase)
|
||||
return env.Get(ConsolePBKDFPassphrase, defaultPBKDFPassphrase)
|
||||
}
|
||||
|
||||
var defaultPBKDFSalt = utils.RandomCharString(64)
|
||||
|
||||
// GetPBKDFSalt returns salt for the pbkdf2 function used to encrypt JWT payload
|
||||
func GetPBKDFSalt() string {
|
||||
return env.Get(McsPBKDFSalt, defaultPBKDFSalt)
|
||||
return env.Get(ConsolePBKDFSalt, defaultPBKDFSalt)
|
||||
}
|
||||
|
||||
@@ -17,8 +17,8 @@
|
||||
package jwt
|
||||
|
||||
const (
|
||||
McsHmacJWTSecret = "MCS_HMAC_JWT_SECRET"
|
||||
McsSTSAndJWTDurationSeconds = "MCS_STS_AND_JWT_DURATION_SECONDS"
|
||||
McsPBKDFPassphrase = "MCS_PBKDF_PASSPHRASE"
|
||||
McsPBKDFSalt = "MCS_PBKDF_SALT"
|
||||
ConsoleHmacJWTSecret = "CONSOLE_HMAC_JWT_SECRET"
|
||||
ConsoleSTSAndJWTDurationSeconds = "CONSOLE_STS_AND_JWT_DURATION_SECONDS"
|
||||
ConsolePBKDFPassphrase = "CONSOLE_PBKDF_PASSPHRASE"
|
||||
ConsolePBKDFSalt = "CONSOLE_PBKDF_SALT"
|
||||
)
|
||||
|
||||
@@ -27,9 +27,9 @@ var (
|
||||
errInvalidCredentials = errors.New("invalid Credentials")
|
||||
)
|
||||
|
||||
// GetMcsCredentialsFromLDAP authenticates the user against MinIO when the LDAP integration is enabled
|
||||
// GetConsoleCredentialsFromLDAP authenticates the user against MinIO when the LDAP integration is enabled
|
||||
// if the authentication succeed *credentials.Credentials object is returned and we continue with the normal STSAssumeRole flow
|
||||
func GetMcsCredentialsFromLDAP(endpoint, ldapUser, ldapPassword string) (*credentials.Credentials, error) {
|
||||
func GetConsoleCredentialsFromLDAP(endpoint, ldapUser, ldapPassword string) (*credentials.Credentials, error) {
|
||||
creds, err := credentials.NewLDAPIdentity(endpoint, ldapUser, ldapPassword)
|
||||
if err != nil {
|
||||
log.Println("LDAP authentication error: ", err)
|
||||
|
||||
@@ -23,5 +23,5 @@ import (
|
||||
)
|
||||
|
||||
func GetLDAPEnabled() bool {
|
||||
return strings.ToLower(env.Get(MCSLDAPEnabled, "off")) == "on"
|
||||
return strings.ToLower(env.Get(ConsoleLDAPEnabled, "off")) == "on"
|
||||
}
|
||||
|
||||
@@ -18,5 +18,5 @@ package ldap
|
||||
|
||||
const (
|
||||
// const for ldap configuration
|
||||
MCSLDAPEnabled = "MCS_LDAP_ENABLED"
|
||||
ConsoleLDAPEnabled = "CONSOLE_LDAP_ENABLED"
|
||||
)
|
||||
|
||||
@@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"log"
|
||||
|
||||
"github.com/minio/mcs/cluster"
|
||||
"github.com/minio/console/cluster"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
operatorClientset "github.com/minio/operator/pkg/client/clientset/versioned"
|
||||
)
|
||||
@@ -30,7 +30,7 @@ type operatorCredentialsProvider struct {
|
||||
serviceAccountJWT string
|
||||
}
|
||||
|
||||
// Implementing the interfaces of the minio Provider, we use this to leverage on the existing mcs Authentication flow
|
||||
// Implementing the interfaces of the minio Provider, we use this to leverage on the existing console Authentication flow
|
||||
func (s operatorCredentialsProvider) Retrieve() (credentials.Value, error) {
|
||||
return credentials.Value{
|
||||
AccessKeyID: "",
|
||||
@@ -65,7 +65,7 @@ func (c *operatorClient) Authenticate(ctx context.Context) ([]byte, error) {
|
||||
}
|
||||
|
||||
// isServiceAccountTokenValid will make an authenticated request against kubernetes api, if the
|
||||
// request success means the provided jwt its a valid service account token and the MCS user can use it for future
|
||||
// request success means the provided jwt its a valid service account token and the console user can use it for future
|
||||
// requests until it expires
|
||||
func isServiceAccountTokenValid(ctx context.Context, operatorClient OperatorClient) bool {
|
||||
_, err := operatorClient.Authenticate(ctx)
|
||||
@@ -76,8 +76,8 @@ func isServiceAccountTokenValid(ctx context.Context, operatorClient OperatorClie
|
||||
return true
|
||||
}
|
||||
|
||||
// GetMcsCredentialsForOperator will validate the provided JWT (service account token) and return it in the form of credentials.Credentials
|
||||
func GetMcsCredentialsForOperator(jwt string) (*credentials.Credentials, error) {
|
||||
// GetConsoleCredentialsForOperator will validate the provided JWT (service account token) and return it in the form of credentials.Credentials
|
||||
func GetConsoleCredentialsForOperator(jwt string) (*credentials.Credentials, error) {
|
||||
ctx := context.Background()
|
||||
opClientClientSet, err := cluster.OperatorClient(jwt)
|
||||
if err != nil {
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/minio/mcs/cluster"
|
||||
"github.com/minio/console/cluster"
|
||||
operatorClientset "github.com/minio/operator/pkg/client/clientset/versioned"
|
||||
)
|
||||
|
||||
|
||||
@@ -21,7 +21,7 @@ package versioned
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
networkingv1beta2 "github.com/minio/mcs/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2"
|
||||
networkingv1beta2 "github.com/minio/console/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2"
|
||||
discovery "k8s.io/client-go/discovery"
|
||||
rest "k8s.io/client-go/rest"
|
||||
flowcontrol "k8s.io/client-go/util/flowcontrol"
|
||||
|
||||
@@ -19,9 +19,9 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
clientset "github.com/minio/mcs/pkg/clientgen/clientset/versioned"
|
||||
networkingv1beta2 "github.com/minio/mcs/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2"
|
||||
fakenetworkingv1beta2 "github.com/minio/mcs/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2/fake"
|
||||
clientset "github.com/minio/console/pkg/clientgen/clientset/versioned"
|
||||
networkingv1beta2 "github.com/minio/console/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2"
|
||||
fakenetworkingv1beta2 "github.com/minio/console/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2/fake"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/discovery"
|
||||
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
networkingv1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
networkingv1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package scheme
|
||||
|
||||
import (
|
||||
networkingv1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
networkingv1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -21,7 +21,7 @@ package fake
|
||||
import (
|
||||
"context"
|
||||
|
||||
v1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
v1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package fake
|
||||
|
||||
import (
|
||||
v1beta2 "github.com/minio/mcs/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2"
|
||||
v1beta2 "github.com/minio/console/pkg/clientgen/clientset/versioned/typed/networking.gke.io/v1beta2"
|
||||
rest "k8s.io/client-go/rest"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
@@ -22,8 +22,8 @@ import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
v1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
scheme "github.com/minio/mcs/pkg/clientgen/clientset/versioned/scheme"
|
||||
v1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
scheme "github.com/minio/console/pkg/clientgen/clientset/versioned/scheme"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
@@ -19,8 +19,8 @@ limitations under the License.
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
v1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
"github.com/minio/mcs/pkg/clientgen/clientset/versioned/scheme"
|
||||
v1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
"github.com/minio/console/pkg/clientgen/clientset/versioned/scheme"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
|
||||
@@ -23,9 +23,9 @@ import (
|
||||
sync "sync"
|
||||
time "time"
|
||||
|
||||
versioned "github.com/minio/mcs/pkg/clientgen/clientset/versioned"
|
||||
internalinterfaces "github.com/minio/mcs/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
networkinggkeio "github.com/minio/mcs/pkg/clientgen/informers/externalversions/networking.gke.io"
|
||||
versioned "github.com/minio/console/pkg/clientgen/clientset/versioned"
|
||||
internalinterfaces "github.com/minio/console/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
networkinggkeio "github.com/minio/console/pkg/clientgen/informers/externalversions/networking.gke.io"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
@@ -21,7 +21,7 @@ package externalversions
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
v1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
v1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
@@ -21,7 +21,7 @@ package internalinterfaces
|
||||
import (
|
||||
time "time"
|
||||
|
||||
versioned "github.com/minio/mcs/pkg/clientgen/clientset/versioned"
|
||||
versioned "github.com/minio/console/pkg/clientgen/clientset/versioned"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
|
||||
@@ -19,8 +19,8 @@ limitations under the License.
|
||||
package networking
|
||||
|
||||
import (
|
||||
internalinterfaces "github.com/minio/mcs/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
v1beta2 "github.com/minio/mcs/pkg/clientgen/informers/externalversions/networking.gke.io/v1beta2"
|
||||
internalinterfaces "github.com/minio/console/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
v1beta2 "github.com/minio/console/pkg/clientgen/informers/externalversions/networking.gke.io/v1beta2"
|
||||
)
|
||||
|
||||
// Interface provides access to each of this group's versions.
|
||||
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
internalinterfaces "github.com/minio/mcs/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
internalinterfaces "github.com/minio/console/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
)
|
||||
|
||||
// Interface provides access to all the informers in this group version.
|
||||
|
||||
@@ -22,10 +22,10 @@ import (
|
||||
"context"
|
||||
time "time"
|
||||
|
||||
networkinggkeiov1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
versioned "github.com/minio/mcs/pkg/clientgen/clientset/versioned"
|
||||
internalinterfaces "github.com/minio/mcs/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
v1beta2 "github.com/minio/mcs/pkg/clientgen/listers/networking.gke.io/v1beta2"
|
||||
networkinggkeiov1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
versioned "github.com/minio/console/pkg/clientgen/clientset/versioned"
|
||||
internalinterfaces "github.com/minio/console/pkg/clientgen/informers/externalversions/internalinterfaces"
|
||||
v1beta2 "github.com/minio/console/pkg/clientgen/listers/networking.gke.io/v1beta2"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
|
||||
@@ -19,7 +19,7 @@ limitations under the License.
|
||||
package v1beta2
|
||||
|
||||
import (
|
||||
v1beta2 "github.com/minio/mcs/pkg/apis/networking.gke.io/v1beta2"
|
||||
v1beta2 "github.com/minio/console/pkg/apis/networking.gke.io/v1beta2"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -110,7 +110,7 @@ const AddTenant = ({
|
||||
const [mountPath, setMountPath] = useState<string>("");
|
||||
const [accessKey, setAccessKey] = useState<string>("");
|
||||
const [secretKey, setSecretKey] = useState<string>("");
|
||||
const [enableMCS, setEnableMCS] = useState<boolean>(true);
|
||||
const [enableConsole, setEnableConsole] = useState<boolean>(true);
|
||||
const [enableSSL, setEnableSSL] = useState<boolean>(false);
|
||||
const [sizeFactor, setSizeFactor] = useState<string>("Gi");
|
||||
const [storageClasses, setStorageClassesList] = useState<Opts[]>([]);
|
||||
@@ -275,7 +275,7 @@ const AddTenant = ({
|
||||
service_name: tenantName,
|
||||
image: imageName,
|
||||
enable_ssl: enableSSL,
|
||||
enable_mcs: enableMCS,
|
||||
enable_console: enableConsole,
|
||||
access_key: accessKey,
|
||||
secret_key: secretKey,
|
||||
volumes_per_server: volumesPerServer,
|
||||
@@ -735,15 +735,15 @@ const AddTenant = ({
|
||||
</div>
|
||||
<Grid item xs={12}>
|
||||
<CheckboxWrapper
|
||||
value="enabled_mcs"
|
||||
id="enabled_mcs"
|
||||
name="enabled_mcs"
|
||||
checked={enableMCS}
|
||||
value="enabled_console"
|
||||
id="enabled_console"
|
||||
name="enabled_console"
|
||||
checked={enableConsole}
|
||||
onChange={(e) => {
|
||||
const targetD = e.target;
|
||||
const checked = targetD.checked;
|
||||
|
||||
setEnableMCS(checked);
|
||||
setEnableConsole(checked);
|
||||
}}
|
||||
label={"Enable Console"}
|
||||
/>
|
||||
@@ -888,9 +888,11 @@ const AddTenant = ({
|
||||
</TableRow>
|
||||
<TableRow>
|
||||
<TableCell align="right" className={classes.tableTitle}>
|
||||
Enable MCS
|
||||
Enable Console
|
||||
</TableCell>
|
||||
<TableCell>
|
||||
{enableConsole ? "Enabled" : "Disabled"}
|
||||
</TableCell>
|
||||
<TableCell>{enableMCS ? "Enabled" : "Disabled"}</TableCell>
|
||||
</TableRow>
|
||||
</React.Fragment>
|
||||
)}
|
||||
|
||||
@@ -23,12 +23,12 @@ import (
|
||||
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
)
|
||||
|
||||
func registerAdminArnsHandlers(api *operations.McsAPI) {
|
||||
func registerAdminArnsHandlers(api *operations.ConsoleAPI) {
|
||||
// return a list of arns
|
||||
api.AdminAPIArnListHandler = admin_api.ArnListHandlerFunc(func(params admin_api.ArnListParams, session *models.Principal) middleware.Responder {
|
||||
arnsResp, err := getArnsResponse(session)
|
||||
|
||||
@@ -24,14 +24,14 @@ import (
|
||||
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
madmin "github.com/minio/minio/pkg/madmin"
|
||||
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
)
|
||||
|
||||
func registerConfigHandlers(api *operations.McsAPI) {
|
||||
func registerConfigHandlers(api *operations.ConsoleAPI) {
|
||||
// List Configurations
|
||||
api.AdminAPIListConfigHandler = admin_api.ListConfigHandlerFunc(func(params admin_api.ListConfigParams, session *models.Principal) middleware.Responder {
|
||||
configListResp, err := getListConfigResponse(session)
|
||||
|
||||
@@ -28,7 +28,7 @@ import (
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/minio/cmd/config"
|
||||
"github.com/minio/minio/pkg/event/target"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
||||
@@ -23,15 +23,15 @@ import (
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/console/models"
|
||||
)
|
||||
|
||||
func registerGroupsHandlers(api *operations.McsAPI) {
|
||||
func registerGroupsHandlers(api *operations.ConsoleAPI) {
|
||||
// List Groups
|
||||
api.AdminAPIListGroupsHandler = admin_api.ListGroupsHandlerFunc(func(params admin_api.ListGroupsParams, session *models.Principal) middleware.Responder {
|
||||
listGroupsResponse, err := getListGroupsResponse(session)
|
||||
|
||||
@@ -22,7 +22,7 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
|
||||
@@ -23,12 +23,12 @@ import (
|
||||
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
)
|
||||
|
||||
func registerAdminInfoHandlers(api *operations.McsAPI) {
|
||||
func registerAdminInfoHandlers(api *operations.ConsoleAPI) {
|
||||
// return usage stats
|
||||
api.AdminAPIAdminInfoHandler = admin_api.AdminInfoHandlerFunc(func(params admin_api.AdminInfoParams, session *models.Principal) middleware.Responder {
|
||||
infoResp, err := getAdminInfoResponse(session)
|
||||
|
||||
@@ -24,12 +24,12 @@ import (
|
||||
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
)
|
||||
|
||||
func registerAdminNotificationEndpointsHandlers(api *operations.McsAPI) {
|
||||
func registerAdminNotificationEndpointsHandlers(api *operations.ConsoleAPI) {
|
||||
// return a list of notification endpoints
|
||||
api.AdminAPINotificationEndpointListHandler = admin_api.NotificationEndpointListHandlerFunc(func(params admin_api.NotificationEndpointListParams, session *models.Principal) middleware.Responder {
|
||||
notifEndpoints, err := getNotificationEndpointsResponse(session)
|
||||
|
||||
@@ -24,8 +24,8 @@ import (
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
)
|
||||
|
||||
func Test_addNotificationEndpoint(t *testing.T) {
|
||||
|
||||
@@ -26,13 +26,13 @@ import (
|
||||
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
)
|
||||
|
||||
func registersPoliciesHandler(api *operations.McsAPI) {
|
||||
func registersPoliciesHandler(api *operations.ConsoleAPI) {
|
||||
// List Policies
|
||||
api.AdminAPIListPoliciesHandler = admin_api.ListPoliciesHandlerFunc(func(params admin_api.ListPoliciesParams, session *models.Principal) middleware.Responder {
|
||||
listPoliciesResponse, err := getListPoliciesResponse(session)
|
||||
|
||||
@@ -25,7 +25,7 @@ import (
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/console/models"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
@@ -97,7 +97,7 @@ func TestRemovePolicy(t *testing.T) {
|
||||
funcAssert := assert.New(t)
|
||||
adminClient := adminClientMock{}
|
||||
// Test-1 : removePolicy() remove an existing policy
|
||||
policyToRemove := "mcs-policy"
|
||||
policyToRemove := "console-policy"
|
||||
minioRemovePolicyMock = func(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -26,13 +26,13 @@ import (
|
||||
"github.com/go-openapi/runtime"
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
func registerProfilingHandler(api *operations.McsAPI) {
|
||||
func registerProfilingHandler(api *operations.ConsoleAPI) {
|
||||
// Start Profiling
|
||||
api.AdminAPIProfilingStartHandler = admin_api.ProfilingStartHandlerFunc(func(params admin_api.ProfilingStartParams, session *models.Principal) middleware.Responder {
|
||||
profilingStartResponse, err := getProfilingStartResponse(session, params.Body)
|
||||
|
||||
@@ -24,7 +24,7 @@ import (
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
@@ -23,13 +23,13 @@ import (
|
||||
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
)
|
||||
|
||||
func registerServiceHandlers(api *operations.McsAPI) {
|
||||
func registerServiceHandlers(api *operations.ConsoleAPI) {
|
||||
// Restart Service
|
||||
api.AdminAPIRestartServiceHandler = admin_api.RestartServiceHandlerFunc(func(params admin_api.RestartServiceParams, session *models.Principal) middleware.Responder {
|
||||
if err := getRestartServiceResponse(session); err != nil {
|
||||
|
||||
@@ -19,6 +19,7 @@ package restapi
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
@@ -33,19 +34,19 @@ import (
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/minio/mcs/cluster"
|
||||
"github.com/minio/console/cluster"
|
||||
madmin "github.com/minio/minio/pkg/madmin"
|
||||
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
operator "github.com/minio/operator/pkg/apis/minio.min.io/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func registerTenantHandlers(api *operations.McsAPI) {
|
||||
func registerTenantHandlers(api *operations.ConsoleAPI) {
|
||||
// Add Tenant
|
||||
api.AdminAPICreateTenantHandler = admin_api.CreateTenantHandlerFunc(func(params admin_api.CreateTenantParams, session *models.Principal) middleware.Responder {
|
||||
resp, err := getTenantCreatedResponse(session, params)
|
||||
@@ -107,10 +108,19 @@ func registerTenantHandlers(api *operations.McsAPI) {
|
||||
err := getTenantAddZoneResponse(session, params)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return admin_api.NewTenantAddZoneDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Unable to update tenant")})
|
||||
return admin_api.NewTenantAddZoneDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Unable to add zone")})
|
||||
}
|
||||
return admin_api.NewTenantAddZoneCreated()
|
||||
})
|
||||
|
||||
api.AdminAPIGetTenantUsageHandler = admin_api.GetTenantUsageHandlerFunc(func(params admin_api.GetTenantUsageParams, session *models.Principal) middleware.Responder {
|
||||
payload, err := getTenantUsageResponse(session, params)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
return admin_api.NewGetTenantUsageDefault(500).WithPayload(&models.Error{Code: 500, Message: swag.String("Unable to get tenant usage")})
|
||||
}
|
||||
return admin_api.NewGetTenantUsageOK().WithPayload(payload)
|
||||
})
|
||||
}
|
||||
|
||||
// deleteTenantAction performs the actions of deleting a tenant
|
||||
@@ -177,7 +187,7 @@ func getTenant(ctx context.Context, operatorClient OperatorClient, namespace, te
|
||||
return minInst, nil
|
||||
}
|
||||
|
||||
func getTenantInfo(tenant *operator.Tenant, tenantInfo *usageInfo) *models.Tenant {
|
||||
func getTenantInfo(tenant *operator.Tenant) *models.Tenant {
|
||||
var instanceCount int64
|
||||
var volumeCount int64
|
||||
for _, zone := range tenant.Spec.Zones {
|
||||
@@ -189,21 +199,7 @@ func getTenantInfo(tenant *operator.Tenant, tenantInfo *usageInfo) *models.Tenan
|
||||
|
||||
var totalSize int64
|
||||
for _, z := range tenant.Spec.Zones {
|
||||
zoneModel := &models.Zone{
|
||||
Name: z.Name,
|
||||
Servers: swag.Int64(int64(z.Servers)),
|
||||
VolumesPerServer: &z.VolumesPerServer,
|
||||
VolumeConfiguration: &models.ZoneVolumeConfiguration{},
|
||||
}
|
||||
|
||||
if z.VolumeClaimTemplate != nil {
|
||||
zoneModel.VolumeConfiguration.Size = swag.Int64(z.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value())
|
||||
if z.VolumeClaimTemplate.Spec.StorageClassName != nil {
|
||||
zoneModel.VolumeConfiguration.StorageClassName = *z.VolumeClaimTemplate.Spec.StorageClassName
|
||||
}
|
||||
}
|
||||
|
||||
zones = append(zones, zoneModel)
|
||||
zones = append(zones, parseTenantZone(&z))
|
||||
zoneSize := int64(z.Servers) * int64(z.VolumesPerServer) * z.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value()
|
||||
totalSize = totalSize + zoneSize
|
||||
}
|
||||
@@ -216,7 +212,6 @@ func getTenantInfo(tenant *operator.Tenant, tenantInfo *usageInfo) *models.Tenan
|
||||
Zones: zones,
|
||||
Namespace: tenant.ObjectMeta.Namespace,
|
||||
Image: tenant.Spec.Image,
|
||||
UsedSize: tenantInfo.DisksUsage,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -229,55 +224,18 @@ func getTenantInfoResponse(session *models.Principal, params admin_api.TenantInf
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientset, err := cluster.K8sClient(session.SessionToken)
|
||||
if err != nil {
|
||||
log.Println("error getting k8sClient:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opClient := &operatorClient{
|
||||
client: opClientClientSet,
|
||||
}
|
||||
k8sClient := &k8sClient{
|
||||
client: clientset,
|
||||
}
|
||||
|
||||
minTenant, err := getTenant(ctx, opClient, params.Namespace, params.Tenant)
|
||||
if err != nil {
|
||||
log.Println("error getting minioTenant:", err)
|
||||
return nil, err
|
||||
}
|
||||
tenantScheme := getTenantScheme(minTenant)
|
||||
|
||||
svcName := minTenant.Spec.ServiceName
|
||||
if svcName == "" {
|
||||
svcName = minTenant.Name
|
||||
// TODO:
|
||||
// 1 get tenant services
|
||||
// 2 filter out cluster ip svc
|
||||
}
|
||||
|
||||
mAdmin, err := getTenantAdminClient(
|
||||
ctx,
|
||||
k8sClient,
|
||||
params.Namespace,
|
||||
params.Tenant,
|
||||
svcName,
|
||||
tenantScheme)
|
||||
if err != nil {
|
||||
log.Println("error getting tenant's admin client:", err)
|
||||
return nil, err
|
||||
}
|
||||
// create a minioClient interface implementation
|
||||
// defining the client to be used
|
||||
adminClient := adminClient{client: mAdmin}
|
||||
// serialize output
|
||||
adminInfo, err := getAdminInfo(ctx, adminClient)
|
||||
if err != nil {
|
||||
log.Println("error getting admin info:", err)
|
||||
return nil, err
|
||||
}
|
||||
info := getTenantInfo(minTenant, adminInfo)
|
||||
info := getTenantInfo(minTenant)
|
||||
return info, nil
|
||||
}
|
||||
|
||||
@@ -297,29 +255,34 @@ func listTenants(ctx context.Context, operatorClient OperatorClient, namespace s
|
||||
|
||||
var tenants []*models.TenantList
|
||||
|
||||
for _, minInst := range minTenants.Items {
|
||||
|
||||
for _, tenant := range minTenants.Items {
|
||||
var totalSize int64
|
||||
var instanceCount int64
|
||||
var volumeCount int64
|
||||
for _, zone := range minInst.Spec.Zones {
|
||||
for _, zone := range tenant.Spec.Zones {
|
||||
instanceCount = instanceCount + int64(zone.Servers)
|
||||
volumeCount = volumeCount + int64(zone.Servers*zone.VolumesPerServer)
|
||||
if zone.VolumeClaimTemplate != nil {
|
||||
zoneSize := int64(zone.VolumesPerServer) * int64(zone.Servers) * zone.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value()
|
||||
totalSize = totalSize + zoneSize
|
||||
}
|
||||
}
|
||||
|
||||
tenants = append(tenants, &models.TenantList{
|
||||
CreationDate: minInst.ObjectMeta.CreationTimestamp.String(),
|
||||
Name: minInst.ObjectMeta.Name,
|
||||
ZoneCount: int64(len(minInst.Spec.Zones)),
|
||||
CreationDate: tenant.ObjectMeta.CreationTimestamp.String(),
|
||||
Name: tenant.ObjectMeta.Name,
|
||||
ZoneCount: int64(len(tenant.Spec.Zones)),
|
||||
InstanceCount: instanceCount,
|
||||
VolumeCount: volumeCount,
|
||||
CurrentState: minInst.Status.CurrentState,
|
||||
Namespace: minInst.ObjectMeta.Namespace,
|
||||
CurrentState: tenant.Status.CurrentState,
|
||||
Namespace: tenant.ObjectMeta.Namespace,
|
||||
TotalSize: totalSize,
|
||||
})
|
||||
}
|
||||
|
||||
return &models.ListTenantsResponse{
|
||||
Tenants: tenants,
|
||||
Total: 0,
|
||||
Total: int64(len(tenants)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -369,6 +332,13 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create
|
||||
}
|
||||
minioImage = *minImg
|
||||
}
|
||||
// get Kubernetes Client
|
||||
clientset, err := cluster.K8sClient(session.SessionToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ns := *params.Body.Namespace
|
||||
|
||||
// if access/secret are provided, use them, else create a random pair
|
||||
accessKey := RandomCharString(16)
|
||||
@@ -392,11 +362,6 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create
|
||||
},
|
||||
}
|
||||
|
||||
clientset, err := cluster.K8sClient(session.SessionToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
ns := *params.Body.Namespace
|
||||
_, err = clientset.CoreV1().Secrets(ns).Create(context.Background(), &instanceSecret, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -406,15 +371,9 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create
|
||||
if params.Body.EnableSsl != nil {
|
||||
enableSSL = *params.Body.EnableSsl
|
||||
}
|
||||
enableMCS := true
|
||||
enableConsole := true
|
||||
if params.Body.EnableConsole != nil {
|
||||
enableMCS = *params.Body.EnableConsole
|
||||
}
|
||||
|
||||
// TODO: Calculate this ourselves?
|
||||
memorySize, err := resource.ParseQuantity(getTenantMemorySize())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
enableConsole = *params.Body.EnableConsole
|
||||
}
|
||||
|
||||
//Construct a MinIO Instance with everything we are getting from parameters
|
||||
@@ -432,23 +391,25 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create
|
||||
},
|
||||
}
|
||||
// optionals are set below
|
||||
|
||||
if enableMCS {
|
||||
mcsSelector := fmt.Sprintf("%s-mcs", *params.Body.Name)
|
||||
|
||||
mcsSecretName := fmt.Sprintf("%s-secret", mcsSelector)
|
||||
var consoleAccess string
|
||||
var consoleSecret string
|
||||
if enableConsole {
|
||||
consoleSelector := fmt.Sprintf("%s-console", *params.Body.Name)
|
||||
consoleSecretName := fmt.Sprintf("%s-secret", consoleSelector)
|
||||
consoleAccess = RandomCharString(16)
|
||||
consoleSecret = RandomCharString(32)
|
||||
imm := true
|
||||
instanceSecret := corev1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: mcsSecretName,
|
||||
Name: consoleSecretName,
|
||||
},
|
||||
Immutable: &imm,
|
||||
Data: map[string][]byte{
|
||||
"MCS_HMAC_JWT_SECRET": []byte(RandomCharString(16)),
|
||||
"MCS_PBKDF_PASSPHRASE": []byte(RandomCharString(16)),
|
||||
"MCS_PBKDF_SALT": []byte(RandomCharString(8)),
|
||||
"MCS_ACCESS_KEY": []byte(RandomCharString(16)),
|
||||
"MCS_SECRET_KEY": []byte(RandomCharString(32)),
|
||||
"CONSOLE_HMAC_JWT_SECRET": []byte(RandomCharString(16)),
|
||||
"CONSOLE_PBKDF_PASSPHRASE": []byte(RandomCharString(16)),
|
||||
"CONSOLE_PBKDF_SALT": []byte(RandomCharString(8)),
|
||||
"CONSOLE_ACCESS_KEY": []byte(consoleAccess),
|
||||
"CONSOLE_SECRET_KEY": []byte(consoleSecret),
|
||||
},
|
||||
}
|
||||
_, err = clientset.CoreV1().Secrets(ns).Create(context.Background(), &instanceSecret, metav1.CreateOptions{})
|
||||
@@ -456,11 +417,11 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create
|
||||
return nil, err
|
||||
}
|
||||
|
||||
const consoleVersion = "minio/mcs:v0.2.1"
|
||||
const consoleVersion = "minio/console:v0.3.3"
|
||||
minInst.Spec.Console = &operator.ConsoleConfiguration{
|
||||
Replicas: 2,
|
||||
Image: consoleVersion,
|
||||
ConsoleSecret: &corev1.LocalObjectReference{Name: mcsSecretName},
|
||||
ConsoleSecret: &corev1.LocalObjectReference{Name: consoleSecretName},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -469,39 +430,12 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create
|
||||
minInst.Spec.ServiceName = params.Body.ServiceName
|
||||
}
|
||||
// set the zones if they are provided
|
||||
if len(params.Body.Zones) > 0 {
|
||||
for _, zone := range params.Body.Zones {
|
||||
volumeSize := resource.NewQuantity(*zone.VolumeConfiguration.Size, resource.DecimalExponent)
|
||||
volTemp := corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: *volumeSize,
|
||||
},
|
||||
},
|
||||
}
|
||||
if zone.VolumeConfiguration.StorageClassName != "" {
|
||||
volTemp.StorageClassName = &zone.VolumeConfiguration.StorageClassName
|
||||
}
|
||||
minInst.Spec.Zones = append(minInst.Spec.Zones, operator.Zone{
|
||||
Name: zone.Name,
|
||||
Servers: int32(*zone.Servers),
|
||||
VolumesPerServer: *zone.VolumesPerServer,
|
||||
VolumeClaimTemplate: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "data",
|
||||
},
|
||||
Spec: volTemp,
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceMemory: memorySize,
|
||||
},
|
||||
},
|
||||
})
|
||||
for _, zone := range params.Body.Zones {
|
||||
zone, err := parseTenantZoneRequest(zone)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
minInst.Spec.Zones = append(minInst.Spec.Zones, *zone)
|
||||
}
|
||||
|
||||
// Set Mount Path if provided
|
||||
@@ -533,11 +467,17 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return &models.CreateTenantResponse{
|
||||
response := &models.CreateTenantResponse{
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
}, nil
|
||||
}
|
||||
// Attach Console Credentials
|
||||
if enableConsole {
|
||||
response.Console = &models.CreateTenantResponseConsole{}
|
||||
response.Console.AccessKey = consoleAccess
|
||||
response.Console.SecretKey = consoleSecret
|
||||
}
|
||||
return response, nil
|
||||
}
|
||||
|
||||
// updateTenantAction does an update on the minioTenant by patching the desired changes
|
||||
@@ -595,22 +535,23 @@ func getUpdateTenantResponse(session *models.Principal, params admin_api.UpdateT
|
||||
|
||||
// addTenantZone creates a zone to a defined tenant
|
||||
func addTenantZone(ctx context.Context, operatorClient OperatorClient, params admin_api.TenantAddZoneParams) error {
|
||||
minInst, err := operatorClient.TenantGet(ctx, params.Namespace, params.Tenant, metav1.GetOptions{})
|
||||
tenant, err := operatorClient.TenantGet(ctx, params.Namespace, params.Tenant, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
minInst.Spec.Zones = append(minInst.Spec.Zones, operator.Zone{
|
||||
Name: params.Body.Name,
|
||||
Servers: int32(*params.Body.Servers),
|
||||
})
|
||||
|
||||
payloadBytes, err := json.Marshal(minInst)
|
||||
zoneParams := params.Body
|
||||
zone, err := parseTenantZoneRequest(zoneParams)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tenant.Spec.Zones = append(tenant.Spec.Zones, *zone)
|
||||
payloadBytes, err := json.Marshal(tenant)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = operatorClient.TenantPatch(ctx, params.Namespace, minInst.Name, types.MergePatchType, payloadBytes, metav1.PatchOptions{})
|
||||
_, err = operatorClient.TenantPatch(ctx, params.Namespace, tenant.Name, types.MergePatchType, payloadBytes, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -633,3 +574,461 @@ func getTenantAddZoneResponse(session *models.Principal, params admin_api.Tenant
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// getTenantUsageResponse returns the usage of a tenant
|
||||
func getTenantUsageResponse(session *models.Principal, params admin_api.GetTenantUsageParams) (*models.TenantUsage, error) {
|
||||
// 5 seconds timeout
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer cancel()
|
||||
|
||||
opClientClientSet, err := cluster.OperatorClient(session.SessionToken)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
clientset, err := cluster.K8sClient(session.SessionToken)
|
||||
if err != nil {
|
||||
log.Println("error getting k8sClient:", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
opClient := &operatorClient{
|
||||
client: opClientClientSet,
|
||||
}
|
||||
k8sClient := &k8sClient{
|
||||
client: clientset,
|
||||
}
|
||||
|
||||
minTenant, err := getTenant(ctx, opClient, params.Namespace, params.Tenant)
|
||||
if err != nil {
|
||||
log.Println("error getting minioTenant:", err)
|
||||
return nil, err
|
||||
}
|
||||
tenantScheme := getTenantScheme(minTenant)
|
||||
|
||||
svcName := minTenant.Spec.ServiceName
|
||||
if svcName == "" {
|
||||
svcName = minTenant.Name
|
||||
// TODO:
|
||||
// 1 get tenant services
|
||||
// 2 filter out cluster ip svc
|
||||
}
|
||||
|
||||
mAdmin, err := getTenantAdminClient(
|
||||
ctx,
|
||||
k8sClient,
|
||||
params.Namespace,
|
||||
params.Tenant,
|
||||
svcName,
|
||||
tenantScheme)
|
||||
if err != nil {
|
||||
log.Println("error getting tenant's admin client:", err)
|
||||
return nil, err
|
||||
}
|
||||
// create a minioClient interface implementation
|
||||
// defining the client to be used
|
||||
adminClient := adminClient{client: mAdmin}
|
||||
// serialize output
|
||||
adminInfo, err := getAdminInfo(ctx, adminClient)
|
||||
if err != nil {
|
||||
log.Println("error getting admin info:", err)
|
||||
return nil, err
|
||||
}
|
||||
info := &models.TenantUsage{UsedSize: adminInfo.Usage}
|
||||
return info, nil
|
||||
}
|
||||
|
||||
// parseTenantZoneRequest parse zone request and returns the equivalent
|
||||
// operator.Zone object
|
||||
func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) {
|
||||
if zoneParams.VolumeConfiguration == nil {
|
||||
return nil, errors.New("a volume configuration must be specified")
|
||||
}
|
||||
|
||||
if zoneParams.VolumeConfiguration.Size == nil || *zoneParams.VolumeConfiguration.Size <= int64(0) {
|
||||
return nil, errors.New("volume size must be greater than 0")
|
||||
}
|
||||
|
||||
if zoneParams.Servers == nil || *zoneParams.Servers <= 0 {
|
||||
return nil, errors.New("number of servers must be greater than 0")
|
||||
}
|
||||
|
||||
if zoneParams.VolumesPerServer == nil || *zoneParams.VolumesPerServer <= 0 {
|
||||
return nil, errors.New("number of volumes per server must be greater than 0")
|
||||
}
|
||||
|
||||
volumeSize := resource.NewQuantity(*zoneParams.VolumeConfiguration.Size, resource.DecimalExponent)
|
||||
volTemp := corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{
|
||||
corev1.ReadWriteOnce,
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: *volumeSize,
|
||||
},
|
||||
},
|
||||
}
|
||||
if zoneParams.VolumeConfiguration.StorageClassName != "" {
|
||||
volTemp.StorageClassName = &zoneParams.VolumeConfiguration.StorageClassName
|
||||
}
|
||||
|
||||
// parse resources' requests
|
||||
var resourcesRequests corev1.ResourceList
|
||||
var resourcesLimits corev1.ResourceList
|
||||
if zoneParams.Resources != nil {
|
||||
for key, val := range zoneParams.Resources.Requests {
|
||||
resourcesRequests[corev1.ResourceName(key)] = *resource.NewQuantity(val, resource.BinarySI)
|
||||
}
|
||||
for key, val := range zoneParams.Resources.Limits {
|
||||
resourcesLimits[corev1.ResourceName(key)] = *resource.NewQuantity(val, resource.BinarySI)
|
||||
}
|
||||
}
|
||||
|
||||
// parse Node Affinity
|
||||
nodeSelectorTerms := []corev1.NodeSelectorTerm{}
|
||||
preferredSchedulingTerm := []corev1.PreferredSchedulingTerm{}
|
||||
if zoneParams.Affinity != nil && zoneParams.Affinity.NodeAffinity != nil {
|
||||
if zoneParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
for _, elem := range zoneParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||
term := parseModelsNodeSelectorTerm(elem)
|
||||
nodeSelectorTerms = append(nodeSelectorTerms, term)
|
||||
}
|
||||
}
|
||||
for _, elem := range zoneParams.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
pst := corev1.PreferredSchedulingTerm{
|
||||
Weight: *elem.Weight,
|
||||
Preference: parseModelsNodeSelectorTerm(elem.Preference),
|
||||
}
|
||||
preferredSchedulingTerm = append(preferredSchedulingTerm, pst)
|
||||
}
|
||||
}
|
||||
var nodeAffinity *corev1.NodeAffinity
|
||||
if len(nodeSelectorTerms) > 0 || len(preferredSchedulingTerm) > 0 {
|
||||
nodeAffinity = &corev1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &corev1.NodeSelector{
|
||||
NodeSelectorTerms: nodeSelectorTerms,
|
||||
},
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: preferredSchedulingTerm,
|
||||
}
|
||||
}
|
||||
|
||||
// parse Pod Affinity
|
||||
podAffinityTerms := []corev1.PodAffinityTerm{}
|
||||
weightedPodAffinityTerms := []corev1.WeightedPodAffinityTerm{}
|
||||
if zoneParams.Affinity != nil && zoneParams.Affinity.PodAffinity != nil {
|
||||
for _, elem := range zoneParams.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
|
||||
podAffinityTerms = append(podAffinityTerms, parseModelPodAffinityTerm(elem))
|
||||
}
|
||||
for _, elem := range zoneParams.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
wAffinityTerm := corev1.WeightedPodAffinityTerm{
|
||||
Weight: *elem.Weight,
|
||||
PodAffinityTerm: parseModelPodAffinityTerm(elem.PodAffinityTerm),
|
||||
}
|
||||
weightedPodAffinityTerms = append(weightedPodAffinityTerms, wAffinityTerm)
|
||||
}
|
||||
}
|
||||
var podAffinity *corev1.PodAffinity
|
||||
if len(podAffinityTerms) > 0 || len(weightedPodAffinityTerms) > 0 {
|
||||
podAffinity = &corev1.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAffinityTerms,
|
||||
}
|
||||
}
|
||||
|
||||
// parse Pod Anti Affinity
|
||||
podAntiAffinityTerms := []corev1.PodAffinityTerm{}
|
||||
weightedPodAntiAffinityTerms := []corev1.WeightedPodAffinityTerm{}
|
||||
if zoneParams.Affinity != nil && zoneParams.Affinity.PodAntiAffinity != nil {
|
||||
for _, elem := range zoneParams.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
|
||||
podAntiAffinityTerms = append(podAntiAffinityTerms, parseModelPodAffinityTerm(elem))
|
||||
}
|
||||
for _, elem := range zoneParams.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
wAffinityTerm := corev1.WeightedPodAffinityTerm{
|
||||
Weight: *elem.Weight,
|
||||
PodAffinityTerm: parseModelPodAffinityTerm(elem.PodAffinityTerm),
|
||||
}
|
||||
weightedPodAntiAffinityTerms = append(weightedPodAntiAffinityTerms, wAffinityTerm)
|
||||
}
|
||||
}
|
||||
var podAntiAffinity *corev1.PodAntiAffinity
|
||||
if len(podAntiAffinityTerms) > 0 || len(weightedPodAntiAffinityTerms) > 0 {
|
||||
podAntiAffinity = &corev1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: podAntiAffinityTerms,
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAntiAffinityTerms,
|
||||
}
|
||||
}
|
||||
|
||||
var affinity *corev1.Affinity
|
||||
if nodeAffinity != nil || podAffinity != nil || podAntiAffinity != nil {
|
||||
affinity = &corev1.Affinity{
|
||||
NodeAffinity: nodeAffinity,
|
||||
PodAffinity: podAffinity,
|
||||
PodAntiAffinity: podAntiAffinity,
|
||||
}
|
||||
}
|
||||
|
||||
// parse tolerations
|
||||
tolerations := []corev1.Toleration{}
|
||||
for _, elem := range zoneParams.Tolerations {
|
||||
toleration := corev1.Toleration{
|
||||
Key: elem.Key,
|
||||
Operator: corev1.TolerationOperator(elem.Operator),
|
||||
Value: elem.Value,
|
||||
Effect: corev1.TaintEffect(elem.Effect),
|
||||
TolerationSeconds: &elem.TolerationSeconds,
|
||||
}
|
||||
tolerations = append(tolerations, toleration)
|
||||
}
|
||||
|
||||
zone := &operator.Zone{
|
||||
Name: zoneParams.Name,
|
||||
Servers: int32(*zoneParams.Servers),
|
||||
VolumesPerServer: *zoneParams.VolumesPerServer,
|
||||
VolumeClaimTemplate: &corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "data",
|
||||
},
|
||||
Spec: volTemp,
|
||||
},
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: resourcesRequests,
|
||||
Limits: resourcesLimits,
|
||||
},
|
||||
NodeSelector: zoneParams.NodeSelector,
|
||||
Affinity: affinity,
|
||||
Tolerations: tolerations,
|
||||
}
|
||||
return zone, nil
|
||||
}
|
||||
|
||||
func parseModelPodAffinityTerm(term *models.PodAffinityTerm) corev1.PodAffinityTerm {
|
||||
labelMatchExpressions := []metav1.LabelSelectorRequirement{}
|
||||
for _, exp := range term.LabelSelector.MatchExpressions {
|
||||
labelSelectorReq := metav1.LabelSelectorRequirement{
|
||||
Key: *exp.Key,
|
||||
Operator: metav1.LabelSelectorOperator(*exp.Operator),
|
||||
Values: exp.Values,
|
||||
}
|
||||
labelMatchExpressions = append(labelMatchExpressions, labelSelectorReq)
|
||||
}
|
||||
|
||||
podAffinityTerm := corev1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchExpressions: labelMatchExpressions,
|
||||
MatchLabels: term.LabelSelector.MatchLabels,
|
||||
},
|
||||
Namespaces: term.Namespaces,
|
||||
TopologyKey: *term.TopologyKey,
|
||||
}
|
||||
return podAffinityTerm
|
||||
}
|
||||
|
||||
func parseModelsNodeSelectorTerm(elem *models.NodeSelectorTerm) corev1.NodeSelectorTerm {
|
||||
var term corev1.NodeSelectorTerm
|
||||
for _, matchExpression := range elem.MatchExpressions {
|
||||
matchExp := corev1.NodeSelectorRequirement{
|
||||
Key: *matchExpression.Key,
|
||||
Operator: corev1.NodeSelectorOperator(*matchExpression.Operator),
|
||||
Values: matchExpression.Values,
|
||||
}
|
||||
term.MatchExpressions = append(term.MatchExpressions, matchExp)
|
||||
}
|
||||
for _, matchField := range elem.MatchFields {
|
||||
matchF := corev1.NodeSelectorRequirement{
|
||||
Key: *matchField.Key,
|
||||
Operator: corev1.NodeSelectorOperator(*matchField.Operator),
|
||||
Values: matchField.Values,
|
||||
}
|
||||
term.MatchFields = append(term.MatchFields, matchF)
|
||||
}
|
||||
return term
|
||||
}
|
||||
|
||||
// parseTenantZone operator Zone object and returns the equivalent
|
||||
// models.Zone object
|
||||
func parseTenantZone(zone *operator.Zone) *models.Zone {
|
||||
var size *int64
|
||||
var storageClassName string
|
||||
if zone.VolumeClaimTemplate != nil {
|
||||
size = swag.Int64(zone.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value())
|
||||
if zone.VolumeClaimTemplate.Spec.StorageClassName != nil {
|
||||
storageClassName = *zone.VolumeClaimTemplate.Spec.StorageClassName
|
||||
}
|
||||
}
|
||||
|
||||
// parse resources' requests
|
||||
var resources *models.ZoneResources
|
||||
resourcesRequests := make(map[string]int64)
|
||||
resourcesLimits := make(map[string]int64)
|
||||
for key, val := range zone.Resources.Requests {
|
||||
resourcesRequests[key.String()] = val.Value()
|
||||
}
|
||||
for key, val := range zone.Resources.Limits {
|
||||
resourcesLimits[key.String()] = val.Value()
|
||||
}
|
||||
if len(resourcesRequests) > 0 || len(resourcesLimits) > 0 {
|
||||
resources = &models.ZoneResources{
|
||||
Limits: resourcesLimits,
|
||||
Requests: resourcesRequests,
|
||||
}
|
||||
}
|
||||
|
||||
// parse Node Affinity
|
||||
nodeSelectorTerms := []*models.NodeSelectorTerm{}
|
||||
preferredSchedulingTerm := []*models.ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{}
|
||||
|
||||
if zone.Affinity != nil && zone.Affinity.NodeAffinity != nil {
|
||||
if zone.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
for _, elem := range zone.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||
term := parseNodeSelectorTerm(&elem)
|
||||
nodeSelectorTerms = append(nodeSelectorTerms, term)
|
||||
}
|
||||
}
|
||||
for _, elem := range zone.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
pst := &models.ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{
|
||||
Weight: swag.Int32(elem.Weight),
|
||||
Preference: parseNodeSelectorTerm(&elem.Preference),
|
||||
}
|
||||
preferredSchedulingTerm = append(preferredSchedulingTerm, pst)
|
||||
}
|
||||
}
|
||||
|
||||
var nodeAffinity *models.ZoneAffinityNodeAffinity
|
||||
if len(nodeSelectorTerms) > 0 || len(preferredSchedulingTerm) > 0 {
|
||||
nodeAffinity = &models.ZoneAffinityNodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &models.ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution{
|
||||
NodeSelectorTerms: nodeSelectorTerms,
|
||||
},
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: preferredSchedulingTerm,
|
||||
}
|
||||
}
|
||||
|
||||
// parse Pod Affinity
|
||||
podAffinityTerms := []*models.PodAffinityTerm{}
|
||||
weightedPodAffinityTerms := []*models.ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{}
|
||||
|
||||
if zone.Affinity != nil && zone.Affinity.PodAffinity != nil {
|
||||
for _, elem := range zone.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
|
||||
podAffinityTerms = append(podAffinityTerms, parsePodAffinityTerm(&elem))
|
||||
}
|
||||
for _, elem := range zone.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
wAffinityTerm := &models.ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{
|
||||
Weight: swag.Int32(elem.Weight),
|
||||
PodAffinityTerm: parsePodAffinityTerm(&elem.PodAffinityTerm),
|
||||
}
|
||||
weightedPodAffinityTerms = append(weightedPodAffinityTerms, wAffinityTerm)
|
||||
}
|
||||
}
|
||||
var podAffinity *models.ZoneAffinityPodAffinity
|
||||
if len(podAffinityTerms) > 0 || len(weightedPodAffinityTerms) > 0 {
|
||||
podAffinity = &models.ZoneAffinityPodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms,
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAffinityTerms,
|
||||
}
|
||||
}
|
||||
|
||||
// parse Pod Anti Affinity
|
||||
podAntiAffinityTerms := []*models.PodAffinityTerm{}
|
||||
weightedPodAntiAffinityTerms := []*models.ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{}
|
||||
|
||||
if zone.Affinity != nil && zone.Affinity.PodAntiAffinity != nil {
|
||||
for _, elem := range zone.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution {
|
||||
podAntiAffinityTerms = append(podAntiAffinityTerms, parsePodAffinityTerm(&elem))
|
||||
}
|
||||
for _, elem := range zone.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
wAffinityTerm := &models.ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{
|
||||
Weight: swag.Int32(elem.Weight),
|
||||
PodAffinityTerm: parsePodAffinityTerm(&elem.PodAffinityTerm),
|
||||
}
|
||||
weightedPodAntiAffinityTerms = append(weightedPodAntiAffinityTerms, wAffinityTerm)
|
||||
}
|
||||
}
|
||||
|
||||
var podAntiAffinity *models.ZoneAffinityPodAntiAffinity
|
||||
if len(podAntiAffinityTerms) > 0 || len(weightedPodAntiAffinityTerms) > 0 {
|
||||
podAntiAffinity = &models.ZoneAffinityPodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: podAntiAffinityTerms,
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAntiAffinityTerms,
|
||||
}
|
||||
}
|
||||
|
||||
// build affinity object
|
||||
var affinity *models.ZoneAffinity
|
||||
if nodeAffinity != nil || podAffinity != nil || podAntiAffinity != nil {
|
||||
affinity = &models.ZoneAffinity{
|
||||
NodeAffinity: nodeAffinity,
|
||||
PodAffinity: podAffinity,
|
||||
PodAntiAffinity: podAntiAffinity,
|
||||
}
|
||||
}
|
||||
|
||||
// parse tolerations
|
||||
var tolerations models.ZoneTolerations
|
||||
for _, elem := range zone.Tolerations {
|
||||
toleration := &models.ZoneTolerationsItems0{
|
||||
Key: elem.Key,
|
||||
Operator: string(elem.Operator),
|
||||
Value: elem.Value,
|
||||
Effect: string(elem.Effect),
|
||||
TolerationSeconds: *elem.TolerationSeconds,
|
||||
}
|
||||
tolerations = append(tolerations, toleration)
|
||||
}
|
||||
|
||||
zoneModel := &models.Zone{
|
||||
Name: zone.Name,
|
||||
Servers: swag.Int64(int64(zone.Servers)),
|
||||
VolumesPerServer: swag.Int32(zone.VolumesPerServer),
|
||||
VolumeConfiguration: &models.ZoneVolumeConfiguration{
|
||||
Size: size,
|
||||
StorageClassName: storageClassName,
|
||||
},
|
||||
NodeSelector: zone.NodeSelector,
|
||||
Resources: resources,
|
||||
Affinity: affinity,
|
||||
Tolerations: tolerations,
|
||||
}
|
||||
return zoneModel
|
||||
}
|
||||
|
||||
func parsePodAffinityTerm(term *corev1.PodAffinityTerm) *models.PodAffinityTerm {
|
||||
labelMatchExpressions := []*models.PodAffinityTermLabelSelectorMatchExpressionsItems0{}
|
||||
for _, exp := range term.LabelSelector.MatchExpressions {
|
||||
labelSelectorReq := &models.PodAffinityTermLabelSelectorMatchExpressionsItems0{
|
||||
Key: swag.String(exp.Key),
|
||||
Operator: swag.String(string(exp.Operator)),
|
||||
Values: exp.Values,
|
||||
}
|
||||
labelMatchExpressions = append(labelMatchExpressions, labelSelectorReq)
|
||||
}
|
||||
|
||||
podAffinityTerm := &models.PodAffinityTerm{
|
||||
LabelSelector: &models.PodAffinityTermLabelSelector{
|
||||
MatchExpressions: labelMatchExpressions,
|
||||
MatchLabels: term.LabelSelector.MatchLabels,
|
||||
},
|
||||
Namespaces: term.Namespaces,
|
||||
TopologyKey: swag.String(term.TopologyKey),
|
||||
}
|
||||
return podAffinityTerm
|
||||
}
|
||||
|
||||
func parseNodeSelectorTerm(term *corev1.NodeSelectorTerm) *models.NodeSelectorTerm {
|
||||
var t models.NodeSelectorTerm
|
||||
for _, matchExpression := range term.MatchExpressions {
|
||||
matchExp := &models.NodeSelectorTermMatchExpressionsItems0{
|
||||
Key: swag.String(matchExpression.Key),
|
||||
Operator: swag.String(string(matchExpression.Operator)),
|
||||
Values: matchExpression.Values,
|
||||
}
|
||||
t.MatchExpressions = append(t.MatchExpressions, matchExp)
|
||||
}
|
||||
for _, matchField := range term.MatchFields {
|
||||
matchF := &models.NodeSelectorTermMatchFieldsItems0{
|
||||
Key: swag.String(matchField.Key),
|
||||
Operator: swag.String(string(matchField.Operator)),
|
||||
Values: matchField.Values,
|
||||
}
|
||||
t.MatchFields = append(t.MatchFields, matchF)
|
||||
}
|
||||
return &t
|
||||
}
|
||||
|
||||
@@ -27,9 +27,9 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/cluster"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/cluster"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
operator "github.com/minio/operator/pkg/apis/minio.min.io/v1"
|
||||
v1 "github.com/minio/operator/pkg/apis/minio.min.io/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
@@ -315,13 +315,12 @@ func Test_TenantInfo(t *testing.T) {
|
||||
},
|
||||
Namespace: "minio-ns",
|
||||
Image: "minio/minio:RELEASE.2020-06-14T18-32-17Z",
|
||||
UsedSize: int64(1024),
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got := getTenantInfo(tt.args.minioTenant, tt.args.tenantInfo)
|
||||
got := getTenantInfo(tt.args.minioTenant)
|
||||
if !reflect.DeepEqual(got, tt.want) {
|
||||
ji, _ := json.Marshal(got)
|
||||
vi, _ := json.Marshal(tt.want)
|
||||
@@ -415,10 +414,92 @@ func Test_TenantAddZone(t *testing.T) {
|
||||
Body: &models.Zone{
|
||||
Name: "zone-1",
|
||||
Servers: swag.Int64(int64(4)),
|
||||
VolumeConfiguration: &models.ZoneVolumeConfiguration{
|
||||
Size: swag.Int64(2147483648),
|
||||
StorageClassName: "standard",
|
||||
},
|
||||
VolumesPerServer: swag.Int32(4),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: false,
|
||||
}, {
|
||||
name: "Add zone, error size",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
operatorClient: opClient,
|
||||
nameSpace: "default",
|
||||
mockTenantPatch: func(ctx context.Context, namespace string, tenantName string, pt types.PatchType, data []byte, options metav1.PatchOptions) (*v1.Tenant, error) {
|
||||
return &v1.Tenant{}, nil
|
||||
},
|
||||
mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) {
|
||||
return &v1.Tenant{}, nil
|
||||
},
|
||||
params: admin_api.TenantAddZoneParams{
|
||||
Body: &models.Zone{
|
||||
Name: "zone-1",
|
||||
Servers: swag.Int64(int64(4)),
|
||||
VolumeConfiguration: &models.ZoneVolumeConfiguration{
|
||||
Size: swag.Int64(0),
|
||||
StorageClassName: "standard",
|
||||
},
|
||||
VolumesPerServer: swag.Int32(4),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Add zone, error servers negative",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
operatorClient: opClient,
|
||||
nameSpace: "default",
|
||||
mockTenantPatch: func(ctx context.Context, namespace string, tenantName string, pt types.PatchType, data []byte, options metav1.PatchOptions) (*v1.Tenant, error) {
|
||||
return &v1.Tenant{}, nil
|
||||
},
|
||||
mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) {
|
||||
return &v1.Tenant{}, nil
|
||||
},
|
||||
params: admin_api.TenantAddZoneParams{
|
||||
Body: &models.Zone{
|
||||
Name: "zone-1",
|
||||
Servers: swag.Int64(int64(-1)),
|
||||
VolumeConfiguration: &models.ZoneVolumeConfiguration{
|
||||
Size: swag.Int64(2147483648),
|
||||
StorageClassName: "standard",
|
||||
},
|
||||
VolumesPerServer: swag.Int32(4),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Add zone, error volumes per server negative",
|
||||
args: args{
|
||||
ctx: context.Background(),
|
||||
operatorClient: opClient,
|
||||
nameSpace: "default",
|
||||
mockTenantPatch: func(ctx context.Context, namespace string, tenantName string, pt types.PatchType, data []byte, options metav1.PatchOptions) (*v1.Tenant, error) {
|
||||
return &v1.Tenant{}, nil
|
||||
},
|
||||
mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) {
|
||||
return &v1.Tenant{}, nil
|
||||
},
|
||||
params: admin_api.TenantAddZoneParams{
|
||||
Body: &models.Zone{
|
||||
Name: "zone-1",
|
||||
Servers: swag.Int64(int64(4)),
|
||||
VolumeConfiguration: &models.ZoneVolumeConfiguration{
|
||||
Size: swag.Int64(2147483648),
|
||||
StorageClassName: "standard",
|
||||
},
|
||||
VolumesPerServer: swag.Int32(-1),
|
||||
},
|
||||
},
|
||||
},
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
name: "Error on patch, handle error",
|
||||
|
||||
@@ -20,9 +20,9 @@ import (
|
||||
"github.com/go-openapi/errors"
|
||||
"github.com/go-openapi/runtime/middleware"
|
||||
"github.com/go-openapi/swag"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/restapi/operations"
|
||||
"github.com/minio/mcs/restapi/operations/admin_api"
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/restapi/operations"
|
||||
"github.com/minio/console/restapi/operations/admin_api"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
|
||||
"context"
|
||||
@@ -31,7 +31,7 @@ import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
func registerUsersHandlers(api *operations.McsAPI) {
|
||||
func registerUsersHandlers(api *operations.ConsoleAPI) {
|
||||
// List Users
|
||||
api.AdminAPIListUsersHandler = admin_api.ListUsersHandlerFunc(func(params admin_api.ListUsersParams, session *models.Principal) middleware.Responder {
|
||||
listUsersResponse, err := getListUsersResponse(session)
|
||||
|
||||
@@ -22,16 +22,16 @@ import (
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
|
||||
"github.com/minio/console/models"
|
||||
mcCmd "github.com/minio/mc/cmd"
|
||||
"github.com/minio/mc/pkg/probe"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
mauth "github.com/minio/minio/pkg/auth"
|
||||
iampolicy "github.com/minio/minio/pkg/iam/policy"
|
||||
"github.com/minio/minio/pkg/madmin"
|
||||
)
|
||||
|
||||
const globalAppName = "mcs"
|
||||
const globalAppName = "console"
|
||||
|
||||
// NewAdminClient gives a new client interface
|
||||
func NewAdminClient(url, accessKey, secretKey string) (*madmin.AdminClient, *probe.Error) {
|
||||
@@ -42,7 +42,7 @@ func NewAdminClient(url, accessKey, secretKey string) (*madmin.AdminClient, *pro
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
AppName: appName,
|
||||
AppVersion: McsVersion,
|
||||
AppVersion: ConsoleVersion,
|
||||
AppComments: []string{appName, runtime.GOOS, runtime.GOARCH},
|
||||
Insecure: false,
|
||||
})
|
||||
|
||||
@@ -23,13 +23,13 @@ import (
|
||||
|
||||
"errors"
|
||||
|
||||
"github.com/minio/console/models"
|
||||
"github.com/minio/console/pkg/acl"
|
||||
"github.com/minio/console/pkg/auth"
|
||||
xjwt "github.com/minio/console/pkg/auth/jwt"
|
||||
"github.com/minio/console/pkg/auth/ldap"
|
||||
mc "github.com/minio/mc/cmd"
|
||||
"github.com/minio/mc/pkg/probe"
|
||||
"github.com/minio/mcs/models"
|
||||
"github.com/minio/mcs/pkg/acl"
|
||||
"github.com/minio/mcs/pkg/auth"
|
||||
xjwt "github.com/minio/mcs/pkg/auth/jwt"
|
||||
"github.com/minio/mcs/pkg/auth/ldap"
|
||||
"github.com/minio/minio-go/v7"
|
||||
"github.com/minio/minio-go/v7/pkg/credentials"
|
||||
"github.com/minio/minio-go/v7/pkg/notification"
|
||||
@@ -93,10 +93,10 @@ func (c minioClient) getBucketPolicy(ctx context.Context, bucketName string) (st
|
||||
return c.client.GetBucketPolicy(ctx, bucketName)
|
||||
}
|
||||
|
||||
// MCS3Client interface with all functions to be implemented
|
||||
// MCClient interface with all functions to be implemented
|
||||
// by mock when testing, it should include all mc/S3Client respective api calls
|
||||
// that are used within this project.
|
||||
type MCS3Client interface {
|
||||
type MCClient interface {
|
||||
addNotificationConfig(ctx context.Context, arn string, events []string, prefix, suffix string, ignoreExisting bool) *probe.Error
|
||||
removeNotificationConfig(ctx context.Context, arn string, event string, prefix string, suffix string) *probe.Error
|
||||
watch(ctx context.Context, options mc.WatchOptions) (*mc.WatchObject, *probe.Error)
|
||||
@@ -106,59 +106,59 @@ type MCS3Client interface {
|
||||
//
|
||||
// Define the structure of a mc S3Client and define the functions that are actually used
|
||||
// from mcS3client api.
|
||||
type mcS3Client struct {
|
||||
type mcClient struct {
|
||||
client *mc.S3Client
|
||||
}
|
||||
|
||||
// implements S3Client.AddNotificationConfig()
|
||||
func (c mcS3Client) addNotificationConfig(ctx context.Context, arn string, events []string, prefix, suffix string, ignoreExisting bool) *probe.Error {
|
||||
func (c mcClient) addNotificationConfig(ctx context.Context, arn string, events []string, prefix, suffix string, ignoreExisting bool) *probe.Error {
|
||||
return c.client.AddNotificationConfig(ctx, arn, events, prefix, suffix, ignoreExisting)
|
||||
}
|
||||
|
||||
// implements S3Client.RemoveNotificationConfig()
|
||||
func (c mcS3Client) removeNotificationConfig(ctx context.Context, arn string, event string, prefix string, suffix string) *probe.Error {
|
||||
func (c mcClient) removeNotificationConfig(ctx context.Context, arn string, event string, prefix string, suffix string) *probe.Error {
|
||||
return c.client.RemoveNotificationConfig(ctx, arn, event, prefix, suffix)
|
||||
}
|
||||
|
||||
func (c mcS3Client) watch(ctx context.Context, options mc.WatchOptions) (*mc.WatchObject, *probe.Error) {
|
||||
func (c mcClient) watch(ctx context.Context, options mc.WatchOptions) (*mc.WatchObject, *probe.Error) {
|
||||
return c.client.Watch(ctx, options)
|
||||
}
|
||||
|
||||
// MCSCredentials interface with all functions to be implemented
|
||||
// by mock when testing, it should include all needed mcsCredentials.Credentials api calls
|
||||
// ConsoleCredentials interface with all functions to be implemented
|
||||
// by mock when testing, it should include all needed consoleCredentials.Credentials api calls
|
||||
// that are used within this project.
|
||||
type MCSCredentials interface {
|
||||
type ConsoleCredentials interface {
|
||||
Get() (credentials.Value, error)
|
||||
Expire()
|
||||
}
|
||||
|
||||
// Interface implementation
|
||||
type mcsCredentials struct {
|
||||
mcsCredentials *credentials.Credentials
|
||||
type consoleCredentials struct {
|
||||
consoleCredentials *credentials.Credentials
|
||||
}
|
||||
|
||||
// implements *Credentials.Get()
|
||||
func (c mcsCredentials) Get() (credentials.Value, error) {
|
||||
return c.mcsCredentials.Get()
|
||||
func (c consoleCredentials) Get() (credentials.Value, error) {
|
||||
return c.consoleCredentials.Get()
|
||||
}
|
||||
|
||||
// implements *Credentials.Expire()
|
||||
func (c mcsCredentials) Expire() {
|
||||
c.mcsCredentials.Expire()
|
||||
func (c consoleCredentials) Expire() {
|
||||
c.consoleCredentials.Expire()
|
||||
}
|
||||
|
||||
// mcsSTSAssumeRole it's a STSAssumeRole wrapper, in general
|
||||
// consoleSTSAssumeRole it's a STSAssumeRole wrapper, in general
|
||||
// there's no need to use this struct anywhere else in the project, it's only required
|
||||
// for passing a custom *http.Client to *credentials.STSAssumeRole
|
||||
type mcsSTSAssumeRole struct {
|
||||
type consoleSTSAssumeRole struct {
|
||||
stsAssumeRole *credentials.STSAssumeRole
|
||||
}
|
||||
|
||||
func (s mcsSTSAssumeRole) Retrieve() (credentials.Value, error) {
|
||||
func (s consoleSTSAssumeRole) Retrieve() (credentials.Value, error) {
|
||||
return s.stsAssumeRole.Retrieve()
|
||||
}
|
||||
|
||||
func (s mcsSTSAssumeRole) IsExpired() bool {
|
||||
func (s consoleSTSAssumeRole) IsExpired() bool {
|
||||
return s.stsAssumeRole.IsExpired()
|
||||
}
|
||||
|
||||
@@ -168,31 +168,31 @@ var (
|
||||
MinioEndpoint = getMinIOServer()
|
||||
)
|
||||
|
||||
func newMcsCredentials(accessKey, secretKey, location string) (*credentials.Credentials, error) {
|
||||
func newConsoleCredentials(accessKey, secretKey, location string) (*credentials.Credentials, error) {
|
||||
// Future authentication methods can be added under this switch statement
|
||||
switch {
|
||||
// authentication for Operator Console
|
||||
case acl.GetOperatorMode():
|
||||
{
|
||||
creds, err := auth.GetMcsCredentialsForOperator(secretKey)
|
||||
creds, err := auth.GetConsoleCredentialsForOperator(secretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return creds, nil
|
||||
}
|
||||
// LDAP authentication for MCS
|
||||
// LDAP authentication for Console
|
||||
case ldap.GetLDAPEnabled():
|
||||
{
|
||||
if MinioEndpoint == "" {
|
||||
return nil, errors.New("endpoint cannot be empty for AssumeRoleSTS")
|
||||
}
|
||||
creds, err := auth.GetMcsCredentialsFromLDAP(MinioEndpoint, accessKey, secretKey)
|
||||
creds, err := auth.GetConsoleCredentialsFromLDAP(MinioEndpoint, accessKey, secretKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return creds, nil
|
||||
}
|
||||
// default authentication for MCS is via STS (Security Token Service) against MinIO
|
||||
// default authentication for Console is via STS (Security Token Service) against MinIO
|
||||
default:
|
||||
{
|
||||
if MinioEndpoint == "" || accessKey == "" || secretKey == "" {
|
||||
@@ -202,15 +202,15 @@ func newMcsCredentials(accessKey, secretKey, location string) (*credentials.Cred
|
||||
AccessKey: accessKey,
|
||||
SecretKey: secretKey,
|
||||
Location: location,
|
||||
DurationSeconds: xjwt.GetMcsSTSAndJWTDurationInSeconds(),
|
||||
DurationSeconds: xjwt.GetConsoleSTSAndJWTDurationInSeconds(),
|
||||
}
|
||||
stsAssumeRole := &credentials.STSAssumeRole{
|
||||
Client: STSClient,
|
||||
STSEndpoint: MinioEndpoint,
|
||||
Options: opts,
|
||||
}
|
||||
mcsSTSWrapper := mcsSTSAssumeRole{stsAssumeRole: stsAssumeRole}
|
||||
return credentials.New(mcsSTSWrapper), nil
|
||||
consoleSTSWrapper := consoleSTSAssumeRole{stsAssumeRole: stsAssumeRole}
|
||||
return credentials.New(consoleSTSWrapper), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -224,16 +224,16 @@ func GetClaimsFromJWT(jwt string) (*auth.DecryptedClaims, error) {
|
||||
return claims, nil
|
||||
}
|
||||
|
||||
// getMcsCredentialsFromSession returns the *mcsCredentials.Credentials associated to the
|
||||
// getConsoleCredentialsFromSession returns the *consoleCredentials.Credentials associated to the
|
||||
// provided jwt, this is useful for running the Expire() or IsExpired() operations
|
||||
func getMcsCredentialsFromSession(claims *models.Principal) *credentials.Credentials {
|
||||
func getConsoleCredentialsFromSession(claims *models.Principal) *credentials.Credentials {
|
||||
return credentials.NewStaticV4(claims.AccessKeyID, claims.SecretAccessKey, claims.SessionToken)
|
||||
}
|
||||
|
||||
// newMinioClient creates a new MinIO client based on the mcsCredentials extracted
|
||||
// newMinioClient creates a new MinIO client based on the consoleCredentials extracted
|
||||
// from the provided jwt
|
||||
func newMinioClient(claims *models.Principal) (*minio.Client, error) {
|
||||
creds := getMcsCredentialsFromSession(claims)
|
||||
creds := getConsoleCredentialsFromSession(claims)
|
||||
minioClient, err := minio.New(getMinIOEndpoint(), &minio.Options{
|
||||
Creds: creds,
|
||||
Secure: getMinIOEndpointIsSecure(),
|
||||
@@ -275,11 +275,11 @@ func newS3BucketClient(claims *models.Principal, bucketName string) (*mc.S3Clien
|
||||
// parameters.
|
||||
func newS3Config(endpoint, accessKey, secretKey, sessionToken string, isSecure bool) *mc.Config {
|
||||
// We have a valid alias and hostConfig. We populate the
|
||||
// mcsCredentials from the match found in the config file.
|
||||
// consoleCredentials from the match found in the config file.
|
||||
s3Config := new(mc.Config)
|
||||
|
||||
s3Config.AppName = "mcs" // TODO: make this a constant
|
||||
s3Config.AppVersion = "" // TODO: get this from constant or build
|
||||
s3Config.AppName = "console" // TODO: make this a constant
|
||||
s3Config.AppVersion = "" // TODO: get this from constant or build
|
||||
s3Config.AppComments = []string{}
|
||||
s3Config.Debug = false
|
||||
s3Config.Insecure = isSecure
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user