From 3c0e3a120808264b1b624d838a4c4d0f4e594159 Mon Sep 17 00:00:00 2001 From: Cesar N Date: Mon, 7 Dec 2020 09:49:51 -0600 Subject: [PATCH] Update console to use latest operator (#476) Main changes Rename everything from Zone->Pool --- go.mod | 2 +- go.sum | 37 +- models/create_tenant_request.go | 62 +- models/{zone.go => pool.go} | 54 +- models/{zone_affinity.go => pool_affinity.go} | 174 +- .../{zone_resources.go => pool_resources.go} | 16 +- ..._seconds.go => pool_toleration_seconds.go} | 18 +- ...one_tolerations.go => pool_tolerations.go} | 30 +- ...date_request.go => pool_update_request.go} | 36 +- models/tenant.go | 22 +- models/tenant_list.go | 6 +- pkg/utils/parity.go | 6 +- portal-ui/src/common/types.ts | 12 +- portal-ui/src/common/utils.ts | 12 +- .../Console/Tenants/ListTenants/AddTenant.tsx | 14 +- .../Tenants/ListTenants/ListTenants.tsx | 2 +- ...ltiSelector.tsx => PoolsMultiSelector.tsx} | 44 +- .../Console/Tenants/ListTenants/types.ts | 8 +- .../{AddZoneModal.tsx => AddPoolModal.tsx} | 28 +- .../TenantDetails/ReplicationSetup.tsx | 2 +- .../Tenants/TenantDetails/TenantDetails.tsx | 50 +- restapi/admin_tenants.go | 280 +-- restapi/admin_tenants_test.go | 94 +- restapi/embedded_spec.go | 1698 ++++++++--------- ...{tenant_add_zone.go => tenant_add_pool.go} | 30 +- ...eters.go => tenant_add_pool_parameters.go} | 24 +- ...ponses.go => tenant_add_pool_responses.go} | 48 +- ...ilder.go => tenant_add_pool_urlbuilder.go} | 28 +- ...update_zones.go => tenant_update_pools.go} | 30 +- ...s.go => tenant_update_pools_parameters.go} | 24 +- ...es.go => tenant_update_pools_responses.go} | 56 +- ...r.go => tenant_update_pools_urlbuilder.go} | 28 +- restapi/operations/console_api.go | 28 +- restapi/server.go | 2 +- swagger.yml | 58 +- 35 files changed, 1517 insertions(+), 1546 deletions(-) rename models/{zone.go => pool.go} (78%) rename models/{zone_affinity.go => pool_affinity.go} (78%) rename models/{zone_resources.go => pool_resources.go} (83%) rename models/{zone_toleration_seconds.go => pool_toleration_seconds.go} (80%) rename models/{zone_tolerations.go => pool_tolerations.go} (80%) rename models/{zone_update_request.go => pool_update_request.go} (67%) rename portal-ui/src/screens/Console/Tenants/ListTenants/{ZonesMultiSelector.tsx => PoolsMultiSelector.tsx} (86%) rename portal-ui/src/screens/Console/Tenants/TenantDetails/{AddZoneModal.tsx => AddPoolModal.tsx} (90%) rename restapi/operations/admin_api/{tenant_add_zone.go => tenant_add_pool.go} (68%) rename restapi/operations/admin_api/{tenant_add_zone_parameters.go => tenant_add_pool_parameters.go} (85%) rename restapi/operations/admin_api/{tenant_add_zone_responses.go => tenant_add_pool_responses.go} (57%) rename restapi/operations/admin_api/{tenant_add_zone_urlbuilder.go => tenant_add_pool_urlbuilder.go} (79%) rename restapi/operations/admin_api/{tenant_update_zones.go => tenant_update_pools.go} (64%) rename restapi/operations/admin_api/{tenant_update_zones_parameters.go => tenant_update_pools_parameters.go} (83%) rename restapi/operations/admin_api/{tenant_update_zones_responses.go => tenant_update_pools_responses.go} (56%) rename restapi/operations/admin_api/{tenant_update_zones_urlbuilder.go => tenant_update_pools_urlbuilder.go} (78%) diff --git a/go.mod b/go.mod index bef0814ca..ff9efaaed 100644 --- a/go.mod +++ b/go.mod @@ -19,7 +19,7 @@ require ( github.com/minio/mc v0.0.0-20201203214312-c691cc548890 github.com/minio/minio v0.0.0-20201203235615-de9b64834eda github.com/minio/minio-go/v7 v7.0.6 - github.com/minio/operator v0.0.0-20201022162018-527e5c32132b + github.com/minio/operator v0.0.0-20201204220226-9901d1d0766c github.com/mitchellh/go-homedir v1.1.0 github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect github.com/secure-io/sio-go v0.3.1 diff --git a/go.sum b/go.sum index c1632d7b9..60eb32522 100644 --- a/go.sum +++ b/go.sum @@ -68,7 +68,6 @@ github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/adal v0.8.3/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= -github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/adal v0.9.1 h1:xjPqigMQe2+0DAJ5A6MLUPp5D2r2Io8qHCuCMMI/yJU= github.com/Azure/go-autorest/autorest/adal v0.9.1/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= @@ -147,18 +146,14 @@ github.com/aws/aws-sdk-go v1.16.26/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpi github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.26.3/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.1/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.29.11/go.mod h1:1KvfttTE3SPKMpo8g2c6jL3ZKfXtFvKscTgahTma5Xg= github.com/aws/aws-sdk-go v1.31.6/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= -github.com/bcicen/jstream v0.0.0-20190220045926-16c1f8af81c2 h1:M+TYzBcNIRyzPRg66ndEqUMd7oWDmhvdQmaPC6EZNwM= -github.com/bcicen/jstream v0.0.0-20190220045926-16c1f8af81c2/go.mod h1:RDu/qcrnpEdJC/p8tx34+YBFqqX71lB7dOX9QE+ZC4M= github.com/bcicen/jstream v1.0.1 h1:BXY7Cu4rdmc0rhyTVyT3UkxAiX3bnLpKLas9btbH5ck= github.com/bcicen/jstream v1.0.1/go.mod h1:9ielPxqFry7Y4Tg3j4BfjPocfJ3TbsRtXOAYXYmRuAQ= -github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beevik/ntp v0.3.0 h1:xzVrPrE4ziasFXgBVBZJDP0Wg/KpMwk2KHJ4Ba8GrDw= github.com/beevik/ntp v0.3.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= @@ -182,7 +177,6 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cheggaaa/pb v1.0.28/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s= github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo= github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -214,8 +208,6 @@ github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= -github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= @@ -274,7 +266,6 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= @@ -300,7 +291,6 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= -github.com/go-ini/ini v1.57.0/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp40uXYvFoEVrNEPqRc= github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM= @@ -427,7 +417,6 @@ github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGt github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/flock v0.0.0-20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= @@ -541,7 +530,6 @@ github.com/goreleaser/nfpm v1.3.0/go.mod h1:w0p7Kc9TAUgWMyrub63ex3M2Mgw88M4GZXoT github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.7.5-0.20200711200521-98cb6bf42e08/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/rpc v1.2.0 h1:WvvdC2lNeT1SP32zrIce5l0ECBfbAlmrmSBsuc57wfk= @@ -672,8 +660,6 @@ github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0 github.com/klauspost/compress v1.8.2/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= -github.com/klauspost/compress v1.10.3 h1:OP96hzwJVBIHYU52pVTI6CczrxPvrGfgqF9N5eTO0Q8= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.11.3 h1:dB4Bn0tN3wdCzQxnS8r06kV74qN/TAfaIS0bVE8h3jc= github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= @@ -683,8 +669,6 @@ github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgo github.com/klauspost/cpuid v1.2.4/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.3.1 h1:5JNjFYYQrZeKRJ0734q51WCEEn2huer72Dc7K+R/b6s= github.com/klauspost/cpuid v1.3.1/go.mod h1:bYW4mA6ZgKPob1/Dlai2LviZJO7KGI3uoWLd42rAQw4= -github.com/klauspost/pgzip v1.2.1 h1:oIPZROsWuPHpOdMVWLuJZXwgjhrW8r1yEX8UqMyeNHM= -github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/readahead v1.3.1 h1:QqXNYvm+VvqYcbrRT4LojUciM0XrznFRIDrbHiJtu/0= @@ -714,7 +698,6 @@ github.com/logrusorgru/aurora v0.0.0-20181002194514-a7b3b318ed4e/go.mod h1:7rIyQ github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.0.0-20180730094502-03f2033d19d5/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -769,24 +752,20 @@ github.com/minio/mc v0.0.0-20201203214312-c691cc548890 h1:TmD1/XEYPsmbaMeeTnYlAk github.com/minio/mc v0.0.0-20201203214312-c691cc548890/go.mod h1:hqA6hYDW4hL04+bsin4nQnlNQgxcyN1AnWg3HhXD8Zc= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio v0.0.0-20200723003940-b9be841fd222/go.mod h1:Eu2KC2p+vW03rnYY/6R/D+QduPB7/j4kBaVA/EDLjWM= github.com/minio/minio v0.0.0-20201203193518-80d31113e5be/go.mod h1:+wH6R6AjgNDUvMBb24p/e7zn8VU+ChUAXy4uhopGxQA= +github.com/minio/minio v0.0.0-20201203193910-919441d9c4d2/go.mod h1:+wH6R6AjgNDUvMBb24p/e7zn8VU+ChUAXy4uhopGxQA= github.com/minio/minio v0.0.0-20201203235615-de9b64834eda h1:ZE7O7pA+8zTJZVOuZrIweiG6238h1/XSZ4A5LiW2e/8= github.com/minio/minio v0.0.0-20201203235615-de9b64834eda/go.mod h1:+wH6R6AjgNDUvMBb24p/e7zn8VU+ChUAXy4uhopGxQA= -github.com/minio/minio-go/v7 v7.0.1/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= -github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= github.com/minio/minio-go/v7 v7.0.6 h1:9czXaG0LEZ9s74smSqy0rm034MxngQoP6HTTuSc5GEs= github.com/minio/minio-go/v7 v7.0.6/go.mod h1:HcIuq+11d/3MfavIPZiswSzfQ1VJ2Lwxp/XLtW46IWQ= -github.com/minio/operator v0.0.0-20201022162018-527e5c32132b h1:ggfD6V3nodTzhHJHCYIT1F897gscrz+hHsan0a2Wtqw= -github.com/minio/operator v0.0.0-20201022162018-527e5c32132b/go.mod h1:At+++4/6W5BEXK11tN5DKIvkJKhYBZybbb5zmxb0LQI= +github.com/minio/operator v0.0.0-20201204220226-9901d1d0766c h1:2QpnenH2gieq5yVh6sZYylXKCoBgwKkxcgqkLr/fq9M= +github.com/minio/operator v0.0.0-20201204220226-9901d1d0766c/go.mod h1:Xnb44PIBZF/JCN4uXEEzf9vFwhnB9zXsQgVKU7GThiM= github.com/minio/selfupdate v0.3.1 h1:BWEFSNnrZVMUWXbXIgLDNDjbejkmpAmZvy/nCz1HlEs= github.com/minio/selfupdate v0.3.1/go.mod h1:b8ThJzzH7u2MkF6PcIra7KaXO9Khf6alWPvMSyTDCFM= github.com/minio/sha256-simd v0.1.1 h1:5QHSlgo3nt5yKOJrC7W8w7X+NFl8cMPZm96iu8kKUJU= github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM= -github.com/minio/simdjson-go v0.1.5-0.20200303142138-b17fe061ea37/go.mod h1:oKURrZZEBtqObgJrSjN1Ln2n9MJj2icuBTkeJzZnvSI= github.com/minio/simdjson-go v0.1.5 h1:6T5mHh7r3kUvgwhmFWQAjoPV5Yt5oD/VPjAI9ViH1kM= github.com/minio/simdjson-go v0.1.5/go.mod h1:oKURrZZEBtqObgJrSjN1Ln2n9MJj2icuBTkeJzZnvSI= -github.com/minio/sio v0.2.0/go.mod h1:nKM5GIWSrqbOZp0uhyj6M1iA0X6xQzSGtYSaTKSCut0= github.com/minio/sio v0.2.1 h1:NjzKiIMSMcHediVQR0AFVx2tp7Wxh9tKPfDI3kH7aHQ= github.com/minio/sio v0.2.1/go.mod h1:8b0yPp2avGThviy/+OCJBI6OMpvxoUuiLvE6F1lebhw= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= @@ -806,7 +785,6 @@ github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.3.2 h1:mRS76wmkOn3KkKAyXDu42V+6ebnXWIztFSYGN7GeoRg= github.com/mitchellh/mapstructure v1.3.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mmcloughlin/avo v0.0.0-20200523190732-4439b6b2c061/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls= github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104 h1:ULR/QWMgcgRiZLUjSSJMU+fW+RDMstRdmnDWj9Q+AsA= github.com/mmcloughlin/avo v0.0.0-20200803215136-443f81d77104/go.mod h1:wqKykBG2QzQDJEzvRkcS8x6MiSJkF52hXZsXcjaB3ls= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -881,7 +859,6 @@ github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUr github.com/pelletier/go-toml v1.8.0/go.mod h1:D6yutnOGMveHEPV7VQOuvI/gXY61bv+9bAOTRnLElKs= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= github.com/phayes/checkstyle v0.0.0-20170904204023-bfd46e6a821d/go.mod h1:3OzsM7FXDQlpCiw2j81fOmAwQLnZnLGXVKUzeKQXIAw= -github.com/philhofer/fwd v1.0.0/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -964,8 +941,6 @@ github.com/securego/gosec v0.0.0-20200401082031-e946c8c39989/go.mod h1:i9l/TNj+y github.com/securego/gosec/v2 v2.3.0/go.mod h1:UzeVyUXbxukhLeHKV3VVqo7HdoQR9MrRfFmZYotn8ME= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/shirou/gopsutil v0.0.0-20190901111213-e4ec7b275ada/go.mod h1:WWnYX4lzhCH5h/3YBfyVA3VbLYjlMZZAQcW9ojMexNc= -github.com/shirou/gopsutil v2.20.3-0.20200314133625-53cec6b37e6a+incompatible h1:YiKUe2ZOmfpDBH4OSyxwkx/mjNqHHnNhOtZ2mPyRme8= -github.com/shirou/gopsutil v2.20.3-0.20200314133625-53cec6b37e6a+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.20.9+incompatible h1:msXs2frUV+O/JLva9EDLpuJ84PrFsdCTCQex8PUdtkQ= github.com/shirou/gopsutil v2.20.9+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= github.com/shirou/gopsutil v2.20.10-0.20201015215925-32d4603d01b7+incompatible h1:4Nw/OaZkO6z7Tj2V48FQkGcVESBvJmKvj+SLKugM6x8= @@ -1044,7 +1019,6 @@ github.com/tidwall/sjson v1.0.4 h1:UcdIRXff12Lpnu3OLtZvnc03g4vH2suXDXhBwBqmzYg= github.com/tidwall/sjson v1.0.4/go.mod h1:bURseu1nuBkFpIES5cz6zBtjmYeOQmEESshn7VpF15Y= github.com/timakin/bodyclose v0.0.0-20190930140734-f7f2e9bca95e/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= github.com/timakin/bodyclose v0.0.0-20200424151742-cb6215831a94/go.mod h1:Qimiffbc6q9tBWlVV6x0P9sat/ao1xEkREYPPj9hphk= -github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.3 h1:3giwAkmtaEDLSV0MdO1lDLuPgklgPzmk8H9+So2BVfA= github.com/tinylib/msgp v1.1.3/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= @@ -1074,7 +1048,6 @@ github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a h1:0R4NLDRDZX6Jc github.com/valyala/tcplisten v0.0.0-20161114210144-ceec8f93295a/go.mod h1:v3UYOV9WzVtRmSR+PDvWpU/qWl4Wa5LApYYX4ZtKbio= github.com/vdemeester/k8s-pkg-credentialprovider v1.17.4/go.mod h1:inCTmtUdr5KJbreVojo06krnTgaeAz/Z7lynpPk/Q2c= github.com/vmware/govmomi v0.20.3/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= -github.com/willf/bitset v1.1.10/go.mod h1:RjeCKbqT1RxIR/KWY6phxZiaY1IyutSBfGjNPySAYV4= github.com/willf/bitset v1.1.11 h1:N7Z7E9UvjW+sGsEl7k/SJrvY2reP1A07MrGuCjIOjRE= github.com/willf/bitset v1.1.11/go.mod h1:83CECat5yLh5zVOf4P1ErAgKA5UDvKtgyUABdr3+MjI= github.com/willf/bloom v2.0.3+incompatible h1:QDacWdqcAUI1MPOwIQZRy9kOR7yxfyEmxX8Wdm2/JPA= @@ -1102,8 +1075,6 @@ go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738 h1:VcrIfasaLFkyjk6KNlXQSzO+B0 go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.0.0-20201125193152-8a03d2e9614b h1:5makfKENOTVu2bNoHzSqwwz+g70ivWLSnExzd33/2bI= go.etcd.io/etcd v0.0.0-20201125193152-8a03d2e9614b/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg= -go.etcd.io/etcd/v3 v3.3.0-rc.0.0.20200707003333-58bb8ae09f8e h1:HZQLoe71Q24wVyDrGBRcVuogx32U+cPlcm/WoSLUI6c= -go.etcd.io/etcd/v3 v3.3.0-rc.0.0.20200707003333-58bb8ae09f8e/go.mod h1:UENlOa05tkNvLx9VnNziSerG4Ro74upGK6Apd4v6M/Y= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= @@ -1132,7 +1103,6 @@ golang.org/x/arch v0.0.0-20190909030613-46d78d1859ac/go.mod h1:flIaEI6LNU6xOCD5P golang.org/x/crypto v0.0.0-20180723164146-c126467f60eb/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20181106171534-e4dc69e5b2fd/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1529,7 +1499,6 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLv gopkg.in/ldap.v3 v3.0.3 h1:YKRHW/2sIl05JsCtx/5ZuUueFuJyoj/6+DGXe3wp6ro= gopkg.in/ldap.v3 v3.0.3/go.mod h1:oxD7NyBuxchC+SgJDE1Q5Od05eGt29SDQVBmV+HYbzw= gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= -gopkg.in/olivere/elastic.v5 v5.0.80/go.mod h1:uhHoB4o3bvX5sorxBU29rPcmBQdV2Qfg0FBrx5D6pV0= gopkg.in/olivere/elastic.v5 v5.0.86 h1:xFy6qRCGAmo5Wjx96srho9BitLhZl2fcnpuidPwduXM= gopkg.in/olivere/elastic.v5 v5.0.86/go.mod h1:M3WNlsF+WhYn7api4D87NIflwTV/c0iVs8cqfWhK+68= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= diff --git a/models/create_tenant_request.go b/models/create_tenant_request.go index 5fd5c013c..2b4a8f40e 100644 --- a/models/create_tenant_request.go +++ b/models/create_tenant_request.go @@ -90,15 +90,15 @@ type CreateTenantRequest struct { // Required: true Namespace *string `json:"namespace"` + // pools + // Required: true + Pools []*Pool `json:"pools"` + // secret key SecretKey string `json:"secret_key,omitempty"` // tls TLS *TLSConfiguration `json:"tls,omitempty"` - - // zones - // Required: true - Zones []*Zone `json:"zones"` } // Validate validates this create tenant request @@ -129,11 +129,11 @@ func (m *CreateTenantRequest) Validate(formats strfmt.Registry) error { res = append(res, err) } - if err := m.validateTLS(formats); err != nil { + if err := m.validatePools(formats); err != nil { res = append(res, err) } - if err := m.validateZones(formats); err != nil { + if err := m.validateTLS(formats); err != nil { res = append(res, err) } @@ -237,6 +237,31 @@ func (m *CreateTenantRequest) validateNamespace(formats strfmt.Registry) error { return nil } +func (m *CreateTenantRequest) validatePools(formats strfmt.Registry) error { + + if err := validate.Required("pools", "body", m.Pools); err != nil { + return err + } + + for i := 0; i < len(m.Pools); i++ { + if swag.IsZero(m.Pools[i]) { // not required + continue + } + + if m.Pools[i] != nil { + if err := m.Pools[i].Validate(formats); err != nil { + if ve, ok := err.(*errors.Validation); ok { + return ve.ValidateName("pools" + "." + strconv.Itoa(i)) + } + return err + } + } + + } + + return nil +} + func (m *CreateTenantRequest) validateTLS(formats strfmt.Registry) error { if swag.IsZero(m.TLS) { // not required @@ -255,31 +280,6 @@ func (m *CreateTenantRequest) validateTLS(formats strfmt.Registry) error { return nil } -func (m *CreateTenantRequest) validateZones(formats strfmt.Registry) error { - - if err := validate.Required("zones", "body", m.Zones); err != nil { - return err - } - - for i := 0; i < len(m.Zones); i++ { - if swag.IsZero(m.Zones[i]) { // not required - continue - } - - if m.Zones[i] != nil { - if err := m.Zones[i].Validate(formats); err != nil { - if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("zones" + "." + strconv.Itoa(i)) - } - return err - } - } - - } - - return nil -} - // MarshalBinary interface implementation func (m *CreateTenantRequest) MarshalBinary() ([]byte, error) { if m == nil { diff --git a/models/zone.go b/models/pool.go similarity index 78% rename from models/zone.go rename to models/pool.go index bb4642944..402f892cd 100644 --- a/models/zone.go +++ b/models/pool.go @@ -29,13 +29,13 @@ import ( "github.com/go-openapi/validate" ) -// Zone zone +// Pool pool // -// swagger:model zone -type Zone struct { +// swagger:model pool +type Pool struct { // affinity - Affinity *ZoneAffinity `json:"affinity,omitempty"` + Affinity *PoolAffinity `json:"affinity,omitempty"` // name Name string `json:"name,omitempty"` @@ -44,26 +44,26 @@ type Zone struct { NodeSelector map[string]string `json:"node_selector,omitempty"` // resources - Resources *ZoneResources `json:"resources,omitempty"` + Resources *PoolResources `json:"resources,omitempty"` // servers // Required: true Servers *int64 `json:"servers"` // tolerations - Tolerations ZoneTolerations `json:"tolerations,omitempty"` + Tolerations PoolTolerations `json:"tolerations,omitempty"` // volume configuration // Required: true - VolumeConfiguration *ZoneVolumeConfiguration `json:"volume_configuration"` + VolumeConfiguration *PoolVolumeConfiguration `json:"volume_configuration"` // volumes per server // Required: true VolumesPerServer *int32 `json:"volumes_per_server"` } -// Validate validates this zone -func (m *Zone) Validate(formats strfmt.Registry) error { +// Validate validates this pool +func (m *Pool) Validate(formats strfmt.Registry) error { var res []error if err := m.validateAffinity(formats); err != nil { @@ -96,7 +96,7 @@ func (m *Zone) Validate(formats strfmt.Registry) error { return nil } -func (m *Zone) validateAffinity(formats strfmt.Registry) error { +func (m *Pool) validateAffinity(formats strfmt.Registry) error { if swag.IsZero(m.Affinity) { // not required return nil @@ -114,7 +114,7 @@ func (m *Zone) validateAffinity(formats strfmt.Registry) error { return nil } -func (m *Zone) validateResources(formats strfmt.Registry) error { +func (m *Pool) validateResources(formats strfmt.Registry) error { if swag.IsZero(m.Resources) { // not required return nil @@ -132,7 +132,7 @@ func (m *Zone) validateResources(formats strfmt.Registry) error { return nil } -func (m *Zone) validateServers(formats strfmt.Registry) error { +func (m *Pool) validateServers(formats strfmt.Registry) error { if err := validate.Required("servers", "body", m.Servers); err != nil { return err @@ -141,7 +141,7 @@ func (m *Zone) validateServers(formats strfmt.Registry) error { return nil } -func (m *Zone) validateTolerations(formats strfmt.Registry) error { +func (m *Pool) validateTolerations(formats strfmt.Registry) error { if swag.IsZero(m.Tolerations) { // not required return nil @@ -157,7 +157,7 @@ func (m *Zone) validateTolerations(formats strfmt.Registry) error { return nil } -func (m *Zone) validateVolumeConfiguration(formats strfmt.Registry) error { +func (m *Pool) validateVolumeConfiguration(formats strfmt.Registry) error { if err := validate.Required("volume_configuration", "body", m.VolumeConfiguration); err != nil { return err @@ -175,7 +175,7 @@ func (m *Zone) validateVolumeConfiguration(formats strfmt.Registry) error { return nil } -func (m *Zone) validateVolumesPerServer(formats strfmt.Registry) error { +func (m *Pool) validateVolumesPerServer(formats strfmt.Registry) error { if err := validate.Required("volumes_per_server", "body", m.VolumesPerServer); err != nil { return err @@ -185,7 +185,7 @@ func (m *Zone) validateVolumesPerServer(formats strfmt.Registry) error { } // MarshalBinary interface implementation -func (m *Zone) MarshalBinary() ([]byte, error) { +func (m *Pool) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -193,8 +193,8 @@ func (m *Zone) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *Zone) UnmarshalBinary(b []byte) error { - var res Zone +func (m *Pool) UnmarshalBinary(b []byte) error { + var res Pool if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -202,10 +202,10 @@ func (m *Zone) UnmarshalBinary(b []byte) error { return nil } -// ZoneVolumeConfiguration zone volume configuration +// PoolVolumeConfiguration pool volume configuration // -// swagger:model ZoneVolumeConfiguration -type ZoneVolumeConfiguration struct { +// swagger:model PoolVolumeConfiguration +type PoolVolumeConfiguration struct { // annotations Annotations map[string]string `json:"annotations,omitempty"` @@ -221,8 +221,8 @@ type ZoneVolumeConfiguration struct { StorageClassName string `json:"storage_class_name,omitempty"` } -// Validate validates this zone volume configuration -func (m *ZoneVolumeConfiguration) Validate(formats strfmt.Registry) error { +// Validate validates this pool volume configuration +func (m *PoolVolumeConfiguration) Validate(formats strfmt.Registry) error { var res []error if err := m.validateSize(formats); err != nil { @@ -235,7 +235,7 @@ func (m *ZoneVolumeConfiguration) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneVolumeConfiguration) validateSize(formats strfmt.Registry) error { +func (m *PoolVolumeConfiguration) validateSize(formats strfmt.Registry) error { if err := validate.Required("volume_configuration"+"."+"size", "body", m.Size); err != nil { return err @@ -245,7 +245,7 @@ func (m *ZoneVolumeConfiguration) validateSize(formats strfmt.Registry) error { } // MarshalBinary interface implementation -func (m *ZoneVolumeConfiguration) MarshalBinary() ([]byte, error) { +func (m *PoolVolumeConfiguration) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -253,8 +253,8 @@ func (m *ZoneVolumeConfiguration) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneVolumeConfiguration) UnmarshalBinary(b []byte) error { - var res ZoneVolumeConfiguration +func (m *PoolVolumeConfiguration) UnmarshalBinary(b []byte) error { + var res PoolVolumeConfiguration if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/models/zone_affinity.go b/models/pool_affinity.go similarity index 78% rename from models/zone_affinity.go rename to models/pool_affinity.go index 70449c16d..b4319fd52 100644 --- a/models/zone_affinity.go +++ b/models/pool_affinity.go @@ -31,23 +31,23 @@ import ( "github.com/go-openapi/validate" ) -// ZoneAffinity If specified, affinity will define the pod's scheduling constraints +// PoolAffinity If specified, affinity will define the pod's scheduling constraints // -// swagger:model zoneAffinity -type ZoneAffinity struct { +// swagger:model poolAffinity +type PoolAffinity struct { // node affinity - NodeAffinity *ZoneAffinityNodeAffinity `json:"nodeAffinity,omitempty"` + NodeAffinity *PoolAffinityNodeAffinity `json:"nodeAffinity,omitempty"` // pod affinity - PodAffinity *ZoneAffinityPodAffinity `json:"podAffinity,omitempty"` + PodAffinity *PoolAffinityPodAffinity `json:"podAffinity,omitempty"` // pod anti affinity - PodAntiAffinity *ZoneAffinityPodAntiAffinity `json:"podAntiAffinity,omitempty"` + PodAntiAffinity *PoolAffinityPodAntiAffinity `json:"podAntiAffinity,omitempty"` } -// Validate validates this zone affinity -func (m *ZoneAffinity) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity +func (m *PoolAffinity) Validate(formats strfmt.Registry) error { var res []error if err := m.validateNodeAffinity(formats); err != nil { @@ -68,7 +68,7 @@ func (m *ZoneAffinity) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneAffinity) validateNodeAffinity(formats strfmt.Registry) error { +func (m *PoolAffinity) validateNodeAffinity(formats strfmt.Registry) error { if swag.IsZero(m.NodeAffinity) { // not required return nil @@ -86,7 +86,7 @@ func (m *ZoneAffinity) validateNodeAffinity(formats strfmt.Registry) error { return nil } -func (m *ZoneAffinity) validatePodAffinity(formats strfmt.Registry) error { +func (m *PoolAffinity) validatePodAffinity(formats strfmt.Registry) error { if swag.IsZero(m.PodAffinity) { // not required return nil @@ -104,7 +104,7 @@ func (m *ZoneAffinity) validatePodAffinity(formats strfmt.Registry) error { return nil } -func (m *ZoneAffinity) validatePodAntiAffinity(formats strfmt.Registry) error { +func (m *PoolAffinity) validatePodAntiAffinity(formats strfmt.Registry) error { if swag.IsZero(m.PodAntiAffinity) { // not required return nil @@ -123,7 +123,7 @@ func (m *ZoneAffinity) validatePodAntiAffinity(formats strfmt.Registry) error { } // MarshalBinary interface implementation -func (m *ZoneAffinity) MarshalBinary() ([]byte, error) { +func (m *PoolAffinity) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -131,8 +131,8 @@ func (m *ZoneAffinity) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneAffinity) UnmarshalBinary(b []byte) error { - var res ZoneAffinity +func (m *PoolAffinity) UnmarshalBinary(b []byte) error { + var res PoolAffinity if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -140,20 +140,20 @@ func (m *ZoneAffinity) UnmarshalBinary(b []byte) error { return nil } -// ZoneAffinityNodeAffinity Describes node affinity scheduling rules for the pod. +// PoolAffinityNodeAffinity Describes node affinity scheduling rules for the pod. // -// swagger:model ZoneAffinityNodeAffinity -type ZoneAffinityNodeAffinity struct { +// swagger:model PoolAffinityNodeAffinity +type PoolAffinityNodeAffinity struct { // The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []*ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"` + PreferredDuringSchedulingIgnoredDuringExecution []*PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"` // required during scheduling ignored during execution - RequiredDuringSchedulingIgnoredDuringExecution *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` + RequiredDuringSchedulingIgnoredDuringExecution *PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution `json:"requiredDuringSchedulingIgnoredDuringExecution,omitempty"` } -// Validate validates this zone affinity node affinity -func (m *ZoneAffinityNodeAffinity) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity node affinity +func (m *PoolAffinityNodeAffinity) Validate(formats strfmt.Registry) error { var res []error if err := m.validatePreferredDuringSchedulingIgnoredDuringExecution(formats); err != nil { @@ -170,7 +170,7 @@ func (m *ZoneAffinityNodeAffinity) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneAffinityNodeAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { +func (m *PoolAffinityNodeAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution) { // not required return nil @@ -195,7 +195,7 @@ func (m *ZoneAffinityNodeAffinity) validatePreferredDuringSchedulingIgnoredDurin return nil } -func (m *ZoneAffinityNodeAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { +func (m *PoolAffinityNodeAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution) { // not required return nil @@ -214,7 +214,7 @@ func (m *ZoneAffinityNodeAffinity) validateRequiredDuringSchedulingIgnoredDuring } // MarshalBinary interface implementation -func (m *ZoneAffinityNodeAffinity) MarshalBinary() ([]byte, error) { +func (m *PoolAffinityNodeAffinity) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -222,8 +222,8 @@ func (m *ZoneAffinityNodeAffinity) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneAffinityNodeAffinity) UnmarshalBinary(b []byte) error { - var res ZoneAffinityNodeAffinity +func (m *PoolAffinityNodeAffinity) UnmarshalBinary(b []byte) error { + var res PoolAffinityNodeAffinity if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -231,10 +231,10 @@ func (m *ZoneAffinityNodeAffinity) UnmarshalBinary(b []byte) error { return nil } -// ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). +// PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op). // -// swagger:model ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 -type ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct { +// swagger:model PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 +type PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct { // A node selector term, associated with the corresponding weight. // Required: true @@ -245,8 +245,8 @@ type ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItem Weight *int32 `json:"weight"` } -// Validate validates this zone affinity node affinity preferred during scheduling ignored during execution items0 -func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity node affinity preferred during scheduling ignored during execution items0 +func (m *PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validatePreference(formats); err != nil { @@ -263,7 +263,7 @@ func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution return nil } -func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePreference(formats strfmt.Registry) error { +func (m *PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePreference(formats strfmt.Registry) error { if err := validate.Required("preference", "body", m.Preference); err != nil { return err @@ -281,7 +281,7 @@ func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution return nil } -func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error { +func (m *PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error { if err := validate.Required("weight", "body", m.Weight); err != nil { return err @@ -291,7 +291,7 @@ func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution } // MarshalBinary interface implementation -func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) { +func (m *PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -299,8 +299,8 @@ func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution } // UnmarshalBinary interface implementation -func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error { - var res ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 +func (m *PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error { + var res PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -308,18 +308,18 @@ func (m *ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecution return nil } -// ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. +// PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node. // -// swagger:model ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution -type ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution struct { +// swagger:model PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution +type PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution struct { // Required. A list of node selector terms. The terms are ORed. // Required: true NodeSelectorTerms []*NodeSelectorTerm `json:"nodeSelectorTerms"` } -// Validate validates this zone affinity node affinity required during scheduling ignored during execution -func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity node affinity required during scheduling ignored during execution +func (m *PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) Validate(formats strfmt.Registry) error { var res []error if err := m.validateNodeSelectorTerms(formats); err != nil { @@ -332,7 +332,7 @@ func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) return nil } -func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) validateNodeSelectorTerms(formats strfmt.Registry) error { +func (m *PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) validateNodeSelectorTerms(formats strfmt.Registry) error { if err := validate.Required("nodeAffinity"+"."+"requiredDuringSchedulingIgnoredDuringExecution"+"."+"nodeSelectorTerms", "body", m.NodeSelectorTerms); err != nil { return err @@ -358,7 +358,7 @@ func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) } // MarshalBinary interface implementation -func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) MarshalBinary() ([]byte, error) { +func (m *PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -366,8 +366,8 @@ func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) } // UnmarshalBinary interface implementation -func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) UnmarshalBinary(b []byte) error { - var res ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution +func (m *PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) UnmarshalBinary(b []byte) error { + var res PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -375,20 +375,20 @@ func (m *ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution) return nil } -// ZoneAffinityPodAffinity Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)). +// PoolAffinityPodAffinity Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, pool, etc. as some other pod(s)). // -// swagger:model ZoneAffinityPodAffinity -type ZoneAffinityPodAffinity struct { +// swagger:model PoolAffinityPodAffinity +type PoolAffinityPodAffinity struct { // The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []*ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"` + PreferredDuringSchedulingIgnoredDuringExecution []*PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"` // If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingIgnoredDuringExecution []*PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution"` } -// Validate validates this zone affinity pod affinity -func (m *ZoneAffinityPodAffinity) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity pod affinity +func (m *PoolAffinityPodAffinity) Validate(formats strfmt.Registry) error { var res []error if err := m.validatePreferredDuringSchedulingIgnoredDuringExecution(formats); err != nil { @@ -405,7 +405,7 @@ func (m *ZoneAffinityPodAffinity) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneAffinityPodAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { +func (m *PoolAffinityPodAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution) { // not required return nil @@ -430,7 +430,7 @@ func (m *ZoneAffinityPodAffinity) validatePreferredDuringSchedulingIgnoredDuring return nil } -func (m *ZoneAffinityPodAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { +func (m *PoolAffinityPodAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution) { // not required return nil @@ -456,7 +456,7 @@ func (m *ZoneAffinityPodAffinity) validateRequiredDuringSchedulingIgnoredDuringE } // MarshalBinary interface implementation -func (m *ZoneAffinityPodAffinity) MarshalBinary() ([]byte, error) { +func (m *PoolAffinityPodAffinity) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -464,8 +464,8 @@ func (m *ZoneAffinityPodAffinity) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneAffinityPodAffinity) UnmarshalBinary(b []byte) error { - var res ZoneAffinityPodAffinity +func (m *PoolAffinityPodAffinity) UnmarshalBinary(b []byte) error { + var res PoolAffinityPodAffinity if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -473,10 +473,10 @@ func (m *ZoneAffinityPodAffinity) UnmarshalBinary(b []byte) error { return nil } -// ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +// PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) // -// swagger:model ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 -type ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct { +// swagger:model PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 +type PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct { // pod affinity term // Required: true @@ -487,8 +487,8 @@ type ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems Weight *int32 `json:"weight"` } -// Validate validates this zone affinity pod affinity preferred during scheduling ignored during execution items0 -func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity pod affinity preferred during scheduling ignored during execution items0 +func (m *PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validatePodAffinityTerm(formats); err != nil { @@ -505,7 +505,7 @@ func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionI return nil } -func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePodAffinityTerm(formats strfmt.Registry) error { +func (m *PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePodAffinityTerm(formats strfmt.Registry) error { if err := validate.Required("podAffinityTerm", "body", m.PodAffinityTerm); err != nil { return err @@ -523,7 +523,7 @@ func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionI return nil } -func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error { +func (m *PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error { if err := validate.Required("weight", "body", m.Weight); err != nil { return err @@ -533,7 +533,7 @@ func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionI } // MarshalBinary interface implementation -func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) { +func (m *PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -541,8 +541,8 @@ func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionI } // UnmarshalBinary interface implementation -func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error { - var res ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 +func (m *PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error { + var res PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -550,20 +550,20 @@ func (m *ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionI return nil } -// ZoneAffinityPodAntiAffinity Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)). +// PoolAffinityPodAntiAffinity Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, pool, etc. as some other pod(s)). // -// swagger:model ZoneAffinityPodAntiAffinity -type ZoneAffinityPodAntiAffinity struct { +// swagger:model PoolAffinityPodAntiAffinity +type PoolAffinityPodAntiAffinity struct { // The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding "weight" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred. - PreferredDuringSchedulingIgnoredDuringExecution []*ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"` + PreferredDuringSchedulingIgnoredDuringExecution []*PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 `json:"preferredDuringSchedulingIgnoredDuringExecution"` // If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied. RequiredDuringSchedulingIgnoredDuringExecution []*PodAffinityTerm `json:"requiredDuringSchedulingIgnoredDuringExecution"` } -// Validate validates this zone affinity pod anti affinity -func (m *ZoneAffinityPodAntiAffinity) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity pod anti affinity +func (m *PoolAffinityPodAntiAffinity) Validate(formats strfmt.Registry) error { var res []error if err := m.validatePreferredDuringSchedulingIgnoredDuringExecution(formats); err != nil { @@ -580,7 +580,7 @@ func (m *ZoneAffinityPodAntiAffinity) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneAffinityPodAntiAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { +func (m *PoolAffinityPodAntiAffinity) validatePreferredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { if swag.IsZero(m.PreferredDuringSchedulingIgnoredDuringExecution) { // not required return nil @@ -605,7 +605,7 @@ func (m *ZoneAffinityPodAntiAffinity) validatePreferredDuringSchedulingIgnoredDu return nil } -func (m *ZoneAffinityPodAntiAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { +func (m *PoolAffinityPodAntiAffinity) validateRequiredDuringSchedulingIgnoredDuringExecution(formats strfmt.Registry) error { if swag.IsZero(m.RequiredDuringSchedulingIgnoredDuringExecution) { // not required return nil @@ -631,7 +631,7 @@ func (m *ZoneAffinityPodAntiAffinity) validateRequiredDuringSchedulingIgnoredDur } // MarshalBinary interface implementation -func (m *ZoneAffinityPodAntiAffinity) MarshalBinary() ([]byte, error) { +func (m *PoolAffinityPodAntiAffinity) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -639,8 +639,8 @@ func (m *ZoneAffinityPodAntiAffinity) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneAffinityPodAntiAffinity) UnmarshalBinary(b []byte) error { - var res ZoneAffinityPodAntiAffinity +func (m *PoolAffinityPodAntiAffinity) UnmarshalBinary(b []byte) error { + var res PoolAffinityPodAntiAffinity if err := swag.ReadJSON(b, &res); err != nil { return err } @@ -648,10 +648,10 @@ func (m *ZoneAffinityPodAntiAffinity) UnmarshalBinary(b []byte) error { return nil } -// ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) +// PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s) // -// swagger:model ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 -type ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct { +// swagger:model PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 +type PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 struct { // pod affinity term // Required: true @@ -662,8 +662,8 @@ type ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionI Weight *int32 `json:"weight"` } -// Validate validates this zone affinity pod anti affinity preferred during scheduling ignored during execution items0 -func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error { +// Validate validates this pool affinity pod anti affinity preferred during scheduling ignored during execution items0 +func (m *PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validatePodAffinityTerm(formats); err != nil { @@ -680,7 +680,7 @@ func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecut return nil } -func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePodAffinityTerm(formats strfmt.Registry) error { +func (m *PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validatePodAffinityTerm(formats strfmt.Registry) error { if err := validate.Required("podAffinityTerm", "body", m.PodAffinityTerm); err != nil { return err @@ -698,7 +698,7 @@ func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecut return nil } -func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error { +func (m *PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) validateWeight(formats strfmt.Registry) error { if err := validate.Required("weight", "body", m.Weight); err != nil { return err @@ -708,7 +708,7 @@ func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecut } // MarshalBinary interface implementation -func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) { +func (m *PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -716,8 +716,8 @@ func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecut } // UnmarshalBinary interface implementation -func (m *ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error { - var res ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 +func (m *PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0) UnmarshalBinary(b []byte) error { + var res PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/models/zone_resources.go b/models/pool_resources.go similarity index 83% rename from models/zone_resources.go rename to models/pool_resources.go index 83813efbb..fd4e1924f 100644 --- a/models/zone_resources.go +++ b/models/pool_resources.go @@ -27,10 +27,10 @@ import ( "github.com/go-openapi/swag" ) -// ZoneResources If provided, use these requests and limit for cpu/memory resource allocation +// PoolResources If provided, use these requests and limit for cpu/memory resource allocation // -// swagger:model zoneResources -type ZoneResources struct { +// swagger:model poolResources +type PoolResources struct { // Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ Limits map[string]int64 `json:"limits,omitempty"` @@ -39,13 +39,13 @@ type ZoneResources struct { Requests map[string]int64 `json:"requests,omitempty"` } -// Validate validates this zone resources -func (m *ZoneResources) Validate(formats strfmt.Registry) error { +// Validate validates this pool resources +func (m *PoolResources) Validate(formats strfmt.Registry) error { return nil } // MarshalBinary interface implementation -func (m *ZoneResources) MarshalBinary() ([]byte, error) { +func (m *PoolResources) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -53,8 +53,8 @@ func (m *ZoneResources) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneResources) UnmarshalBinary(b []byte) error { - var res ZoneResources +func (m *PoolResources) UnmarshalBinary(b []byte) error { + var res PoolResources if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/models/zone_toleration_seconds.go b/models/pool_toleration_seconds.go similarity index 80% rename from models/zone_toleration_seconds.go rename to models/pool_toleration_seconds.go index 9c89b194e..ba40a35bd 100644 --- a/models/zone_toleration_seconds.go +++ b/models/pool_toleration_seconds.go @@ -29,18 +29,18 @@ import ( "github.com/go-openapi/validate" ) -// ZoneTolerationSeconds TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. +// PoolTolerationSeconds TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system. // -// swagger:model zoneTolerationSeconds -type ZoneTolerationSeconds struct { +// swagger:model poolTolerationSeconds +type PoolTolerationSeconds struct { // seconds // Required: true Seconds *int64 `json:"seconds"` } -// Validate validates this zone toleration seconds -func (m *ZoneTolerationSeconds) Validate(formats strfmt.Registry) error { +// Validate validates this pool toleration seconds +func (m *PoolTolerationSeconds) Validate(formats strfmt.Registry) error { var res []error if err := m.validateSeconds(formats); err != nil { @@ -53,7 +53,7 @@ func (m *ZoneTolerationSeconds) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneTolerationSeconds) validateSeconds(formats strfmt.Registry) error { +func (m *PoolTolerationSeconds) validateSeconds(formats strfmt.Registry) error { if err := validate.Required("seconds", "body", m.Seconds); err != nil { return err @@ -63,7 +63,7 @@ func (m *ZoneTolerationSeconds) validateSeconds(formats strfmt.Registry) error { } // MarshalBinary interface implementation -func (m *ZoneTolerationSeconds) MarshalBinary() ([]byte, error) { +func (m *PoolTolerationSeconds) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -71,8 +71,8 @@ func (m *ZoneTolerationSeconds) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneTolerationSeconds) UnmarshalBinary(b []byte) error { - var res ZoneTolerationSeconds +func (m *PoolTolerationSeconds) UnmarshalBinary(b []byte) error { + var res PoolTolerationSeconds if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/models/zone_tolerations.go b/models/pool_tolerations.go similarity index 80% rename from models/zone_tolerations.go rename to models/pool_tolerations.go index f893ee22f..12b42ed81 100644 --- a/models/zone_tolerations.go +++ b/models/pool_tolerations.go @@ -30,13 +30,13 @@ import ( "github.com/go-openapi/swag" ) -// ZoneTolerations Tolerations allows users to set entries like effect, key, operator, value. +// PoolTolerations Tolerations allows users to set entries like effect, key, operator, value. // -// swagger:model zoneTolerations -type ZoneTolerations []*ZoneTolerationsItems0 +// swagger:model poolTolerations +type PoolTolerations []*PoolTolerationsItems0 -// Validate validates this zone tolerations -func (m ZoneTolerations) Validate(formats strfmt.Registry) error { +// Validate validates this pool tolerations +func (m PoolTolerations) Validate(formats strfmt.Registry) error { var res []error for i := 0; i < len(m); i++ { @@ -61,10 +61,10 @@ func (m ZoneTolerations) Validate(formats strfmt.Registry) error { return nil } -// ZoneTolerationsItems0 The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . +// PoolTolerationsItems0 The pod this Toleration is attached to tolerates any taint that matches the triple using the matching operator . // -// swagger:model ZoneTolerationsItems0 -type ZoneTolerationsItems0 struct { +// swagger:model PoolTolerationsItems0 +type PoolTolerationsItems0 struct { // Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute. Effect string `json:"effect,omitempty"` @@ -76,14 +76,14 @@ type ZoneTolerationsItems0 struct { Operator string `json:"operator,omitempty"` // toleration seconds - TolerationSeconds *ZoneTolerationSeconds `json:"tolerationSeconds,omitempty"` + TolerationSeconds *PoolTolerationSeconds `json:"tolerationSeconds,omitempty"` // Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string. Value string `json:"value,omitempty"` } -// Validate validates this zone tolerations items0 -func (m *ZoneTolerationsItems0) Validate(formats strfmt.Registry) error { +// Validate validates this pool tolerations items0 +func (m *PoolTolerationsItems0) Validate(formats strfmt.Registry) error { var res []error if err := m.validateTolerationSeconds(formats); err != nil { @@ -96,7 +96,7 @@ func (m *ZoneTolerationsItems0) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneTolerationsItems0) validateTolerationSeconds(formats strfmt.Registry) error { +func (m *PoolTolerationsItems0) validateTolerationSeconds(formats strfmt.Registry) error { if swag.IsZero(m.TolerationSeconds) { // not required return nil @@ -115,7 +115,7 @@ func (m *ZoneTolerationsItems0) validateTolerationSeconds(formats strfmt.Registr } // MarshalBinary interface implementation -func (m *ZoneTolerationsItems0) MarshalBinary() ([]byte, error) { +func (m *PoolTolerationsItems0) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -123,8 +123,8 @@ func (m *ZoneTolerationsItems0) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneTolerationsItems0) UnmarshalBinary(b []byte) error { - var res ZoneTolerationsItems0 +func (m *PoolTolerationsItems0) UnmarshalBinary(b []byte) error { + var res PoolTolerationsItems0 if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/models/zone_update_request.go b/models/pool_update_request.go similarity index 67% rename from models/zone_update_request.go rename to models/pool_update_request.go index f92b834b3..ba0264136 100644 --- a/models/zone_update_request.go +++ b/models/pool_update_request.go @@ -31,21 +31,21 @@ import ( "github.com/go-openapi/validate" ) -// ZoneUpdateRequest zone update request +// PoolUpdateRequest pool update request // -// swagger:model zoneUpdateRequest -type ZoneUpdateRequest struct { +// swagger:model poolUpdateRequest +type PoolUpdateRequest struct { - // zones + // pools // Required: true - Zones []*Zone `json:"zones"` + Pools []*Pool `json:"pools"` } -// Validate validates this zone update request -func (m *ZoneUpdateRequest) Validate(formats strfmt.Registry) error { +// Validate validates this pool update request +func (m *PoolUpdateRequest) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateZones(formats); err != nil { + if err := m.validatePools(formats); err != nil { res = append(res, err) } @@ -55,21 +55,21 @@ func (m *ZoneUpdateRequest) Validate(formats strfmt.Registry) error { return nil } -func (m *ZoneUpdateRequest) validateZones(formats strfmt.Registry) error { +func (m *PoolUpdateRequest) validatePools(formats strfmt.Registry) error { - if err := validate.Required("zones", "body", m.Zones); err != nil { + if err := validate.Required("pools", "body", m.Pools); err != nil { return err } - for i := 0; i < len(m.Zones); i++ { - if swag.IsZero(m.Zones[i]) { // not required + for i := 0; i < len(m.Pools); i++ { + if swag.IsZero(m.Pools[i]) { // not required continue } - if m.Zones[i] != nil { - if err := m.Zones[i].Validate(formats); err != nil { + if m.Pools[i] != nil { + if err := m.Pools[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("zones" + "." + strconv.Itoa(i)) + return ve.ValidateName("pools" + "." + strconv.Itoa(i)) } return err } @@ -81,7 +81,7 @@ func (m *ZoneUpdateRequest) validateZones(formats strfmt.Registry) error { } // MarshalBinary interface implementation -func (m *ZoneUpdateRequest) MarshalBinary() ([]byte, error) { +func (m *PoolUpdateRequest) MarshalBinary() ([]byte, error) { if m == nil { return nil, nil } @@ -89,8 +89,8 @@ func (m *ZoneUpdateRequest) MarshalBinary() ([]byte, error) { } // UnmarshalBinary interface implementation -func (m *ZoneUpdateRequest) UnmarshalBinary(b []byte) error { - var res ZoneUpdateRequest +func (m *PoolUpdateRequest) UnmarshalBinary(b []byte) error { + var res PoolUpdateRequest if err := swag.ReadJSON(b, &res); err != nil { return err } diff --git a/models/tenant.go b/models/tenant.go index da6cdf08b..c9b972a51 100644 --- a/models/tenant.go +++ b/models/tenant.go @@ -59,18 +59,18 @@ type Tenant struct { // namespace Namespace string `json:"namespace,omitempty"` + // pools + Pools []*Pool `json:"pools"` + // total size TotalSize int64 `json:"total_size,omitempty"` - - // zones - Zones []*Zone `json:"zones"` } // Validate validates this tenant func (m *Tenant) Validate(formats strfmt.Registry) error { var res []error - if err := m.validateZones(formats); err != nil { + if err := m.validatePools(formats); err != nil { res = append(res, err) } @@ -80,21 +80,21 @@ func (m *Tenant) Validate(formats strfmt.Registry) error { return nil } -func (m *Tenant) validateZones(formats strfmt.Registry) error { +func (m *Tenant) validatePools(formats strfmt.Registry) error { - if swag.IsZero(m.Zones) { // not required + if swag.IsZero(m.Pools) { // not required return nil } - for i := 0; i < len(m.Zones); i++ { - if swag.IsZero(m.Zones[i]) { // not required + for i := 0; i < len(m.Pools); i++ { + if swag.IsZero(m.Pools[i]) { // not required continue } - if m.Zones[i] != nil { - if err := m.Zones[i].Validate(formats); err != nil { + if m.Pools[i] != nil { + if err := m.Pools[i].Validate(formats); err != nil { if ve, ok := err.(*errors.Validation); ok { - return ve.ValidateName("zones" + "." + strconv.Itoa(i)) + return ve.ValidateName("pools" + "." + strconv.Itoa(i)) } return err } diff --git a/models/tenant_list.go b/models/tenant_list.go index 5532f874e..c043107e3 100644 --- a/models/tenant_list.go +++ b/models/tenant_list.go @@ -50,14 +50,14 @@ type TenantList struct { // namespace Namespace string `json:"namespace,omitempty"` + // pool count + PoolCount int64 `json:"pool_count,omitempty"` + // total size TotalSize int64 `json:"total_size,omitempty"` // volume count VolumeCount int64 `json:"volume_count,omitempty"` - - // zone count - ZoneCount int64 `json:"zone_count,omitempty"` } // Validate validates this tenant list diff --git a/pkg/utils/parity.go b/pkg/utils/parity.go index 528e5f3d5..c83508f5c 100644 --- a/pkg/utils/parity.go +++ b/pkg/utils/parity.go @@ -185,9 +185,9 @@ func getTotalSizes(argPatterns []ellipses.ArgPattern) []uint64 { } // PossibleParityValues returns possible parities for input args, -// parties are calculated in uniform manner for one zone or -// multiple zones, ensuring that parities returned are common -// and applicable across all zones. +// parties are calculated in uniform manner for one pool or +// multiple pools, ensuring that parities returned are common +// and applicable across all pools. func PossibleParityValues(args ...string) ([]string, error) { setIndexes, err := parseEndpointSet(args...) if err != nil { diff --git a/portal-ui/src/common/types.ts b/portal-ui/src/common/types.ts index bbb5c103e..80da8557a 100644 --- a/portal-ui/src/common/types.ts +++ b/portal-ui/src/common/types.ts @@ -34,8 +34,8 @@ export interface ITenant { volume_count: string; volume_size: string; volumes_per_server?: string; - zone_count: string; - zones?: IZoneModel[]; + pool_count: string; + pools?: IPoolModel[]; used_capacity?: string; endpoint?: string; storage_class?: string; @@ -58,7 +58,7 @@ export interface ITenantCreator { secret_key: string; image: string; console_image: string; - zones: IZoneModel[]; + pools: IPoolModel[]; namespace: string; erasureCodingParity: number; tls?: ITLSTenantConfiguration; @@ -299,7 +299,7 @@ export interface IArchivedTenant { capacity: number; } -export interface IZoneModel { +export interface IPoolModel { name?: string; servers: number; volumes_per_server: number; @@ -309,8 +309,8 @@ export interface IZoneModel { resources?: IResourceModel; } -export interface IUpdateZone { - zones: IZoneModel[]; +export interface IUpdatePool { + pools: IPoolModel[]; } export interface INode { diff --git a/portal-ui/src/common/utils.ts b/portal-ui/src/common/utils.ts index 528479260..30ca3f19c 100644 --- a/portal-ui/src/common/utils.ts +++ b/portal-ui/src/common/utils.ts @@ -15,7 +15,7 @@ // along with this program. If not, see . import storage from "local-storage-fallback"; -import { ICapacity, IZoneModel } from "./types"; +import { ICapacity, IPoolModel } from "./types"; const minStReq = 1073741824; // Minimal Space required for MinIO const minMemReq = 2147483648; // Minimal Memory required for MinIO in bytes @@ -204,7 +204,7 @@ export const calculateDistribution = ( if (parseInt(requestedSizeBytes, 10) < minStReq) { return { - error: "The zone size must be greater than 1Gi", + error: "The pool size must be greater than 1Gi", nodes: 0, persistentVolumes: 0, disks: 0, @@ -324,9 +324,9 @@ const structureCalc = ( }; }; -// Zone Name Generator -export const generateZoneName = (zones: IZoneModel[]) => { - const zoneCounter = zones.length; +// Pool Name Generator +export const generatePoolName = (pools: IPoolModel[]) => { + const poolCounter = pools.length; - return `zone-${zoneCounter}`; + return `pool-${poolCounter}`; }; diff --git a/portal-ui/src/screens/Console/Tenants/ListTenants/AddTenant.tsx b/portal-ui/src/screens/Console/Tenants/ListTenants/AddTenant.tsx index 865c55c67..7fad67f7b 100644 --- a/portal-ui/src/screens/Console/Tenants/ListTenants/AddTenant.tsx +++ b/portal-ui/src/screens/Console/Tenants/ListTenants/AddTenant.tsx @@ -33,7 +33,7 @@ import FormSwitchWrapper from "../../Common/FormComponents/FormSwitchWrapper/For import SelectWrapper from "../../Common/FormComponents/SelectWrapper/SelectWrapper"; import { calculateDistribution, - generateZoneName, + generatePoolName, getBytes, k8sfactorForDropdown, niceBytes, @@ -91,7 +91,7 @@ const styles = (theme: Theme) => fontWeight: 700, width: "30%", }, - zoneError: { + poolError: { color: "#dc1f2e", fontSize: "0.75rem", paddingLeft: 120, @@ -737,7 +737,7 @@ const AddTenant = ({ /* Send Information to backend */ useEffect(() => { if (addSending) { - const zoneName = generateZoneName([]); + const poolName = generatePoolName([]); const hardCodedAffinity: IAffinityModel = { podAntiAffinity: { @@ -751,9 +751,9 @@ const AddTenant = ({ values: [tenantName], }, { - key: "v1.min.io/zone", + key: "v1.min.io/pool", operator: "In", - values: [zoneName], + values: [poolName], }, ], }, @@ -778,9 +778,9 @@ const AddTenant = ({ service_name: "", image: imageName, console_image: consoleImage, - zones: [ + pools: [ { - name: zoneName, + name: poolName, servers: distribution.nodes, volumes_per_server: distribution.disks, volume_configuration: { diff --git a/portal-ui/src/screens/Console/Tenants/ListTenants/ListTenants.tsx b/portal-ui/src/screens/Console/Tenants/ListTenants/ListTenants.tsx index 9d842cf2c..3a6c28916 100644 --- a/portal-ui/src/screens/Console/Tenants/ListTenants/ListTenants.tsx +++ b/portal-ui/src/screens/Console/Tenants/ListTenants/ListTenants.tsx @@ -294,7 +294,7 @@ const ListTenants = ({ classes }: ITenantsList) => { columns={[ { label: "Name", elementKey: "name" }, { label: "Capacity", elementKey: "capacity" }, - { label: "# of Zones", elementKey: "zone_count" }, + { label: "# of Pools", elementKey: "pool_count" }, { label: "State", elementKey: "currentState" }, ]} isLoading={isLoading} diff --git a/portal-ui/src/screens/Console/Tenants/ListTenants/ZonesMultiSelector.tsx b/portal-ui/src/screens/Console/Tenants/ListTenants/PoolsMultiSelector.tsx similarity index 86% rename from portal-ui/src/screens/Console/Tenants/ListTenants/ZonesMultiSelector.tsx rename to portal-ui/src/screens/Console/Tenants/ListTenants/PoolsMultiSelector.tsx index 2b909a2b2..6e15455e4 100644 --- a/portal-ui/src/screens/Console/Tenants/ListTenants/ZonesMultiSelector.tsx +++ b/portal-ui/src/screens/Console/Tenants/ListTenants/PoolsMultiSelector.tsx @@ -26,16 +26,16 @@ import { tooltipHelper, } from "../../Common/FormComponents/common/styleLibrary"; import DeleteIcon from "../../../../icons/DeleteIcon"; -import { IZone } from "./types"; +import { IPool } from "./types"; -interface IZonesMultiSelector { - elements: IZone[]; +interface IPoolsMultiSelector { + elements: IPool[]; name: string; label: string; tooltip?: string; classes: any; - onChange: (elements: IZone[]) => void; + onChange: (elements: IPool[]) => void; } const gridBasic = { @@ -78,15 +78,15 @@ const styles = (theme: Theme) => }, }); -const ZonesMultiSelector = ({ +const PoolsMultiSelector = ({ elements, name, label, tooltip = "", onChange, classes, -}: IZonesMultiSelector) => { - const defaultZone: IZone = { +}: IPoolsMultiSelector) => { + const defaultPool: IPool = { name: "", servers: 0, capacity: "", @@ -95,7 +95,7 @@ const ZonesMultiSelector = ({ volume_configuration: { size: 0, storage_class: "", labels: null }, }; - const [currentElements, setCurrentElements] = useState([]); + const [currentElements, setCurrentElements] = useState([]); const [internalCounter, setInternalCounter] = useState(1); const bottomList = createRef(); @@ -108,7 +108,7 @@ const ZonesMultiSelector = ({ useEffect(() => { if (currentElements.length === 0 && elements.length === 0) { // Initial Value - setCurrentElements([{ ...defaultZone, name: "zone-1" }]); + setCurrentElements([{ ...defaultPool, name: "pool-1" }]); } else if (currentElements.length === 0 && elements.length > 0) { setCurrentElements(elements); setInternalCounter(elements.length); @@ -116,7 +116,7 @@ const ZonesMultiSelector = ({ }, [currentElements, elements]); // If the last input is not empty, we add a new one - const addEmptyRow = (elementsUp: IZone[]) => { + const addEmptyRow = (elementsUp: IPool[]) => { const lastElement = elementsUp[elementsUp.length - 1]; const internalElement = internalCounter + 1; if ( @@ -125,8 +125,8 @@ const ZonesMultiSelector = ({ !isNaN(lastElement.servers) ) { elementsUp.push({ - ...defaultZone, - name: `zone-${internalElement}`, + ...defaultPool, + name: `pool-${internalElement}`, }); const refScroll = bottomList.current; @@ -147,7 +147,7 @@ const ZonesMultiSelector = ({ let updatedElement = [...currentElements]; const index = get(e.target, "dataset.index", 0); - const rowPosition: IZone = updatedElement[index]; + const rowPosition: IPool = updatedElement[index]; rowPosition.servers = field === "servers" ? parseInt(e.target.value) : rowPosition.servers; @@ -159,19 +159,19 @@ const ZonesMultiSelector = ({ setCurrentElements(updatedElement); }; - const deleteElement = (zoneToRemove: number) => { - const zonesClone = [...currentElements]; + const deleteElement = (poolToRemove: number) => { + const poolsClone = [...currentElements]; - const newArray = zonesClone - .slice(0, zoneToRemove) - .concat(zonesClone.slice(zoneToRemove + 1, zonesClone.length)); + const newArray = poolsClone + .slice(0, poolToRemove) + .concat(poolsClone.slice(poolToRemove + 1, poolsClone.length)); setCurrentElements(newArray); }; const inputs = currentElements.map((element, index) => { return ( - +
onChangeElement(e, "name")} index={index} - key={`Zones-${name}-${index.toString()}-name`} + key={`Pools-${name}-${index.toString()}-name`} />
@@ -193,7 +193,7 @@ const ZonesMultiSelector = ({ value={currentElements[index].servers.toString(10)} onChange={(e) => onChangeElement(e, "servers")} index={index} - key={`Zones-${name}-${index.toString()}-servers`} + key={`Pools-${name}-${index.toString()}-servers`} />
@@ -239,4 +239,4 @@ const ZonesMultiSelector = ({ ); }; -export default withStyles(styles)(ZonesMultiSelector); +export default withStyles(styles)(PoolsMultiSelector); diff --git a/portal-ui/src/screens/Console/Tenants/ListTenants/types.ts b/portal-ui/src/screens/Console/Tenants/ListTenants/types.ts index c26388cd8..4c31963d8 100644 --- a/portal-ui/src/screens/Console/Tenants/ListTenants/types.ts +++ b/portal-ui/src/screens/Console/Tenants/ListTenants/types.ts @@ -14,7 +14,7 @@ // You should have received a copy of the GNU Affero General Public License // along with this program. If not, see . -export interface IZone { +export interface IPool { name: string; servers: number; volumes_per_server: number; @@ -24,7 +24,7 @@ export interface IZone { volumes: number; } -export interface IAddZoneRequest { +export interface IAddPoolRequest { name: string; servers: number; volumes_per_server: number; @@ -43,14 +43,14 @@ export interface ITenant { namespace: string; image: string; console_image: string; - zone_count: number; + pool_count: number; currentState: string; instance_count: 4; creation_date: Date; volume_size: number; volume_count: number; volumes_per_server: number; - zones: IZone[]; + pools: IPool[]; // computed capacity: string; } diff --git a/portal-ui/src/screens/Console/Tenants/TenantDetails/AddZoneModal.tsx b/portal-ui/src/screens/Console/Tenants/TenantDetails/AddPoolModal.tsx similarity index 90% rename from portal-ui/src/screens/Console/Tenants/TenantDetails/AddZoneModal.tsx rename to portal-ui/src/screens/Console/Tenants/TenantDetails/AddPoolModal.tsx index 9f7011802..e57c7a55d 100644 --- a/portal-ui/src/screens/Console/Tenants/TenantDetails/AddZoneModal.tsx +++ b/portal-ui/src/screens/Console/Tenants/TenantDetails/AddPoolModal.tsx @@ -7,13 +7,13 @@ import Grid from "@material-ui/core/Grid"; import { niceBytes } from "../../../../common/utils"; import { Button, LinearProgress } from "@material-ui/core"; import api from "../../../../common/api"; -import { IAddZoneRequest, ITenant } from "../ListTenants/types"; +import { IAddPoolRequest, ITenant } from "../ListTenants/types"; -interface IAddZoneProps { +interface IAddPoolProps { tenant: ITenant; classes: any; open: boolean; - onCloseZoneAndReload: (shouldReload: boolean) => void; + onClosePoolAndReload: (shouldReload: boolean) => void; } const styles = (theme: Theme) => @@ -58,12 +58,12 @@ const styles = (theme: Theme) => ...modalBasic, }); -const AddZoneModal = ({ +const AddPoolModal = ({ tenant, classes, open, - onCloseZoneAndReload, -}: IAddZoneProps) => { + onClosePoolAndReload, +}: IAddPoolProps) => { const [addSending, setAddSending] = useState(false); const [numberOfNodes, setNumberOfNodes] = useState(0); const [volumeSize, setVolumeSize] = useState(0); @@ -74,9 +74,9 @@ const AddZoneModal = ({ return ( onCloseZoneAndReload(false)} + onClose={() => onClosePoolAndReload(false)} modalOpen={open} - title="Add Zone" + title="Add Pool" >
) => { e.preventDefault(); setAddSending(true); - const data: IAddZoneRequest = { + const data: IAddPoolRequest = { name: "", servers: numberOfNodes, volumes_per_server: volumesPerServer, @@ -97,12 +97,12 @@ const AddZoneModal = ({ api .invoke( "POST", - `/api/v1/namespaces/${tenant.namespace}/tenants/${tenant.name}/zones`, + `/api/v1/namespaces/${tenant.namespace}/tenants/${tenant.name}/pools`, data ) .then(() => { setAddSending(false); - onCloseZoneAndReload(true); + onClosePoolAndReload(true); }) .catch((err) => { setAddSending(false); @@ -124,8 +124,8 @@ const AddZoneModal = ({ ) => { setVolumeSize(parseInt(e.target.value)); @@ -184,4 +184,4 @@ const AddZoneModal = ({ ); }; -export default withStyles(styles)(AddZoneModal); +export default withStyles(styles)(AddPoolModal); diff --git a/portal-ui/src/screens/Console/Tenants/TenantDetails/ReplicationSetup.tsx b/portal-ui/src/screens/Console/Tenants/TenantDetails/ReplicationSetup.tsx index 6ee3dea53..d73d04acb 100644 --- a/portal-ui/src/screens/Console/Tenants/TenantDetails/ReplicationSetup.tsx +++ b/portal-ui/src/screens/Console/Tenants/TenantDetails/ReplicationSetup.tsx @@ -76,7 +76,7 @@ const ReplicationSetup = ({ return ( { closeModalAndRefresh(false); }} diff --git a/portal-ui/src/screens/Console/Tenants/TenantDetails/TenantDetails.tsx b/portal-ui/src/screens/Console/Tenants/TenantDetails/TenantDetails.tsx index 230c28c49..d181d6390 100644 --- a/portal-ui/src/screens/Console/Tenants/TenantDetails/TenantDetails.tsx +++ b/portal-ui/src/screens/Console/Tenants/TenantDetails/TenantDetails.tsx @@ -29,11 +29,11 @@ import { CreateIcon } from "../../../../icons"; import TableWrapper from "../../Common/TableWrapper/TableWrapper"; import Paper from "@material-ui/core/Paper"; import { niceBytes } from "../../../../common/utils"; -import AddZoneModal from "./AddZoneModal"; +import AddPoolModal from "./AddPoolModal"; import AddBucket from "../../Buckets/ListBuckets/AddBucket"; import ReplicationSetup from "./ReplicationSetup"; import api from "../../../../common/api"; -import { ITenant, IZone } from "../ListTenants/types"; +import { ITenant, IPool } from "../ListTenants/types"; import Logs from "./Logs/Logs"; import Trace from "./Trace/Trace"; import Watch from "./Watch/Watch"; @@ -107,11 +107,11 @@ const styles = (theme: Theme) => const TenantDetails = ({ classes, match }: ITenantDetailsProps) => { const [selectedTab, setSelectedTab] = useState(0); const [capacity, setCapacity] = useState(0); - const [zoneCount, setZoneCount] = useState(0); - const [serverSets, setServerSets] = useState([]); + const [poolCount, setPoolCount] = useState(0); + const [serverSets, setServerSets] = useState([]); const [instances, setInstances] = useState(0); const [volumes, setVolumes] = useState(0); - const [addZoneOpen, setAddZone] = useState(false); + const [addPoolOpen, setAddPool] = useState(false); const [addBucketOpen, setAddBucketOpen] = useState(false); const [addReplicationOpen, setAddReplicationOpen] = useState(false); const [error, setError] = useState(""); @@ -123,8 +123,8 @@ const TenantDetails = ({ classes, match }: ITenantDetailsProps) => { const tenantName = match.params["tenantName"]; const tenantNamespace = match.params["tenantNamespace"]; - const onCloseZoneAndRefresh = (reload: boolean) => { - setAddZone(false); + const onClosePoolAndRefresh = (reload: boolean) => { + setAddPool(false); if (reload) { console.log("reload"); @@ -150,29 +150,29 @@ const TenantDetails = ({ classes, match }: ITenantDetailsProps) => { `/api/v1/namespaces/${tenantNamespace}/tenants/${tenantName}` ) .then((res: ITenant) => { - const resZones = !res.zones ? [] : res.zones; + const resPools = !res.pools ? [] : res.pools; let totalInstances = 0; let totalVolumes = 0; let count = 1; - for (let zone of resZones) { + for (let pool of resPools) { const cap = - zone.volumes_per_server * - zone.servers * - zone.volume_configuration.size; - zone.name = `zone-${count}`; - zone.capacity = niceBytes(cap + ""); - zone.volumes = zone.servers * zone.volumes_per_server; - totalInstances += zone.servers; - totalVolumes += zone.volumes; + pool.volumes_per_server * + pool.servers * + pool.volume_configuration.size; + pool.name = `pool-${count}`; + pool.capacity = niceBytes(cap + ""); + pool.volumes = pool.servers * pool.volumes_per_server; + totalInstances += pool.servers; + totalVolumes += pool.volumes; count += 1; } setCapacity(res.total_size); - setZoneCount(resZones.length); + setPoolCount(resPools.length); setVolumes(totalVolumes); setInstances(totalInstances); - setServerSets(resZones); + setServerSets(resPools); setTenant(res); setError(""); @@ -209,10 +209,10 @@ const TenantDetails = ({ classes, match }: ITenantDetailsProps) => { return ( - {addZoneOpen && tenant !== null && ( - )} @@ -247,7 +247,7 @@ const TenantDetails = ({ classes, match }: ITenantDetailsProps) => {
Minio:
{tenant ? tenant.image : ""}
Clusters:
-
{zoneCount}
+
{poolCount}
Console:
{tenant ? tenant.console_image : ""}
Instances:
@@ -293,7 +293,7 @@ const TenantDetails = ({ classes, match }: ITenantDetailsProps) => { color="primary" startIcon={} onClick={() => { - setAddZone(true); + setAddPool(true); }} > Expand Tenant diff --git a/restapi/admin_tenants.go b/restapi/admin_tenants.go index c8f4c6a1c..acc334a4b 100644 --- a/restapi/admin_tenants.go +++ b/restapi/admin_tenants.go @@ -115,13 +115,13 @@ func registerTenantHandlers(api *operations.ConsoleAPI) { return admin_api.NewUpdateTenantCreated() }) - // Add Tenant Zones - api.AdminAPITenantAddZoneHandler = admin_api.TenantAddZoneHandlerFunc(func(params admin_api.TenantAddZoneParams, session *models.Principal) middleware.Responder { - err := getTenantAddZoneResponse(session, params) + // Add Tenant Pools + api.AdminAPITenantAddPoolHandler = admin_api.TenantAddPoolHandlerFunc(func(params admin_api.TenantAddPoolParams, session *models.Principal) middleware.Responder { + err := getTenantAddPoolResponse(session, params) if err != nil { - return admin_api.NewTenantAddZoneDefault(int(err.Code)).WithPayload(err) + return admin_api.NewTenantAddPoolDefault(int(err.Code)).WithPayload(err) } - return admin_api.NewTenantAddZoneCreated() + return admin_api.NewTenantAddPoolCreated() }) // Get Tenant Usage @@ -133,13 +133,13 @@ func registerTenantHandlers(api *operations.ConsoleAPI) { return admin_api.NewGetTenantUsageOK().WithPayload(payload) }) - // Update Tenant Zones - api.AdminAPITenantUpdateZonesHandler = admin_api.TenantUpdateZonesHandlerFunc(func(params admin_api.TenantUpdateZonesParams, session *models.Principal) middleware.Responder { - resp, err := getTenantUpdateZoneResponse(session, params) + // Update Tenant Pools + api.AdminAPITenantUpdatePoolsHandler = admin_api.TenantUpdatePoolsHandlerFunc(func(params admin_api.TenantUpdatePoolsParams, session *models.Principal) middleware.Responder { + resp, err := getTenantUpdatePoolResponse(session, params) if err != nil { - return admin_api.NewTenantUpdateZonesDefault(int(err.Code)).WithPayload(err) + return admin_api.NewTenantUpdatePoolsDefault(int(err.Code)).WithPayload(err) } - return admin_api.NewTenantUpdateZonesOK().WithPayload(resp) + return admin_api.NewTenantUpdatePoolsOK().WithPayload(resp) }) // Update Tenant Certificates @@ -300,13 +300,13 @@ func isPrometheusEnabled(annotations map[string]string) bool { } func getTenantInfo(tenant *operator.Tenant) *models.Tenant { - var zones []*models.Zone + var pools []*models.Pool consoleImage := "" var totalSize int64 - for _, z := range tenant.Spec.Zones { - zones = append(zones, parseTenantZone(&z)) - zoneSize := int64(z.Servers) * int64(z.VolumesPerServer) * z.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value() - totalSize = totalSize + zoneSize + for _, p := range tenant.Spec.Pools { + pools = append(pools, parseTenantPool(&p)) + poolSize := int64(p.Servers) * int64(p.VolumesPerServer) * p.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value() + totalSize = totalSize + poolSize } var deletion string if tenant.ObjectMeta.DeletionTimestamp != nil { @@ -323,7 +323,7 @@ func getTenantInfo(tenant *operator.Tenant) *models.Tenant { Name: tenant.Name, TotalSize: totalSize, CurrentState: tenant.Status.CurrentState, - Zones: zones, + Pools: pools, Namespace: tenant.ObjectMeta.Namespace, Image: tenant.Spec.Image, ConsoleImage: consoleImage, @@ -374,12 +374,12 @@ func listTenants(ctx context.Context, operatorClient OperatorClientI, namespace var totalSize int64 var instanceCount int64 var volumeCount int64 - for _, zone := range tenant.Spec.Zones { - instanceCount = instanceCount + int64(zone.Servers) - volumeCount = volumeCount + int64(zone.Servers*zone.VolumesPerServer) - if zone.VolumeClaimTemplate != nil { - zoneSize := int64(zone.VolumesPerServer) * int64(zone.Servers) * zone.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value() - totalSize = totalSize + zoneSize + for _, pool := range tenant.Spec.Pools { + instanceCount = instanceCount + int64(pool.Servers) + volumeCount = volumeCount + int64(pool.Servers*pool.VolumesPerServer) + if pool.VolumeClaimTemplate != nil { + poolSize := int64(pool.VolumesPerServer) * int64(pool.Servers) * pool.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value() + totalSize = totalSize + poolSize } } @@ -392,7 +392,7 @@ func listTenants(ctx context.Context, operatorClient OperatorClientI, namespace CreationDate: tenant.ObjectMeta.CreationTimestamp.String(), DeletionDate: deletion, Name: tenant.ObjectMeta.Name, - ZoneCount: int64(len(tenant.Spec.Zones)), + PoolCount: int64(len(tenant.Spec.Pools)), InstanceCount: instanceCount, VolumeCount: volumeCount, CurrentState: tenant.Status.CurrentState, @@ -737,13 +737,14 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create annotations = tenantReq.Annotations minInst.Annotations = annotations } - // set the zones if they are provided - for _, zone := range tenantReq.Zones { - zone, err := parseTenantZoneRequest(zone) + // set the pools if they are provided + for _, pool := range tenantReq.Pools { + pool, err := parseTenantPoolRequest(pool) if err != nil { + log.Println("parseTenantPoolRequest", err) return nil, prepareError(err) } - minInst.Spec.Zones = append(minInst.Spec.Zones, *zone) + minInst.Spec.Pools = append(minInst.Spec.Pools, *pool) } // Set Mount Path if provided @@ -785,6 +786,7 @@ func getTenantCreatedResponse(session *models.Principal, params admin_api.Create _, err = opClient.MinioV1().Tenants(ns).Create(context.Background(), &minInst, metav1.CreateOptions{}) if err != nil { + log.Println("Create", err) return nil, prepareError(err) } @@ -911,21 +913,21 @@ func updateTenantAction(ctx context.Context, operatorClient OperatorClientI, cli if params.Body.EnablePrometheus && currentAnnotations != nil { // add prometheus annotations to the tenant minInst.Annotations = addAnnotations(currentAnnotations, prometheusAnnotations) - // add prometheus annotations to the each zone - if minInst.Spec.Zones != nil { - for _, zone := range minInst.Spec.Zones { - zoneAnnotations := zone.VolumeClaimTemplate.GetObjectMeta().GetAnnotations() - zone.VolumeClaimTemplate.GetObjectMeta().SetAnnotations(addAnnotations(zoneAnnotations, prometheusAnnotations)) + // add prometheus annotations to the each pool + if minInst.Spec.Pools != nil { + for _, pool := range minInst.Spec.Pools { + poolAnnotations := pool.VolumeClaimTemplate.GetObjectMeta().GetAnnotations() + pool.VolumeClaimTemplate.GetObjectMeta().SetAnnotations(addAnnotations(poolAnnotations, prometheusAnnotations)) } } } else { // remove prometheus annotations to the tenant minInst.Annotations = removeAnnotations(currentAnnotations, prometheusAnnotations) - // add prometheus annotations from each zone - if minInst.Spec.Zones != nil { - for _, zone := range minInst.Spec.Zones { - zoneAnnotations := zone.VolumeClaimTemplate.GetObjectMeta().GetAnnotations() - zone.VolumeClaimTemplate.GetObjectMeta().SetAnnotations(removeAnnotations(zoneAnnotations, prometheusAnnotations)) + // add prometheus annotations from each pool + if minInst.Spec.Pools != nil { + for _, pool := range minInst.Spec.Pools { + poolAnnotations := pool.VolumeClaimTemplate.GetObjectMeta().GetAnnotations() + pool.VolumeClaimTemplate.GetObjectMeta().SetAnnotations(removeAnnotations(poolAnnotations, prometheusAnnotations)) } } } @@ -988,19 +990,19 @@ func getUpdateTenantResponse(session *models.Principal, params admin_api.UpdateT return nil } -// addTenantZone creates a zone to a defined tenant -func addTenantZone(ctx context.Context, operatorClient OperatorClientI, params admin_api.TenantAddZoneParams) error { +// addTenantPool creates a pool to a defined tenant +func addTenantPool(ctx context.Context, operatorClient OperatorClientI, params admin_api.TenantAddPoolParams) error { tenant, err := operatorClient.TenantGet(ctx, params.Namespace, params.Tenant, metav1.GetOptions{}) if err != nil { return err } - zoneParams := params.Body - zone, err := parseTenantZoneRequest(zoneParams) + poolParams := params.Body + pool, err := parseTenantPoolRequest(poolParams) if err != nil { return err } - tenant.Spec.Zones = append(tenant.Spec.Zones, *zone) + tenant.Spec.Pools = append(tenant.Spec.Pools, *pool) payloadBytes, err := json.Marshal(tenant) if err != nil { return err @@ -1013,7 +1015,7 @@ func addTenantZone(ctx context.Context, operatorClient OperatorClientI, params a return nil } -func getTenantAddZoneResponse(session *models.Principal, params admin_api.TenantAddZoneParams) *models.Error { +func getTenantAddPoolResponse(session *models.Principal, params admin_api.TenantAddPoolParams) *models.Error { ctx := context.Background() opClientClientSet, err := cluster.OperatorClient(session.SessionToken) if err != nil { @@ -1022,8 +1024,8 @@ func getTenantAddZoneResponse(session *models.Principal, params admin_api.Tenant opClient := &operatorClient{ client: opClientClientSet, } - if err := addTenantZone(ctx, opClient, params); err != nil { - return prepareError(err, errors.New("unable to add zone")) + if err := addTenantPool(ctx, opClient, params); err != nil { + return prepareError(err, errors.New("unable to add pool")) } return nil } @@ -1080,26 +1082,26 @@ func getTenantUsageResponse(session *models.Principal, params admin_api.GetTenan return info, nil } -// parseTenantZoneRequest parse zone request and returns the equivalent -// operator.Zone object -func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) { - if zoneParams.VolumeConfiguration == nil { +// parseTenantPoolRequest parse pool request and returns the equivalent +// operator.Pool object +func parseTenantPoolRequest(poolParams *models.Pool) (*operator.Pool, error) { + if poolParams.VolumeConfiguration == nil { return nil, errors.New("a volume configuration must be specified") } - if zoneParams.VolumeConfiguration.Size == nil || *zoneParams.VolumeConfiguration.Size <= int64(0) { + if poolParams.VolumeConfiguration.Size == nil || *poolParams.VolumeConfiguration.Size <= int64(0) { return nil, errors.New("volume size must be greater than 0") } - if zoneParams.Servers == nil || *zoneParams.Servers <= 0 { + if poolParams.Servers == nil || *poolParams.Servers <= 0 { return nil, errors.New("number of servers must be greater than 0") } - if zoneParams.VolumesPerServer == nil || *zoneParams.VolumesPerServer <= 0 { + if poolParams.VolumesPerServer == nil || *poolParams.VolumesPerServer <= 0 { return nil, errors.New("number of volumes per server must be greater than 0") } - volumeSize := resource.NewQuantity(*zoneParams.VolumeConfiguration.Size, resource.DecimalExponent) + volumeSize := resource.NewQuantity(*poolParams.VolumeConfiguration.Size, resource.DecimalExponent) volTemp := corev1.PersistentVolumeClaimSpec{ AccessModes: []corev1.PersistentVolumeAccessMode{ corev1.ReadWriteOnce, @@ -1110,18 +1112,18 @@ func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) { }, }, } - if zoneParams.VolumeConfiguration.StorageClassName != "" { - volTemp.StorageClassName = &zoneParams.VolumeConfiguration.StorageClassName + if poolParams.VolumeConfiguration.StorageClassName != "" { + volTemp.StorageClassName = &poolParams.VolumeConfiguration.StorageClassName } // parse resources' requests resourcesRequests := make(corev1.ResourceList) resourcesLimits := make(corev1.ResourceList) - if zoneParams.Resources != nil { - for key, val := range zoneParams.Resources.Requests { + if poolParams.Resources != nil { + for key, val := range poolParams.Resources.Requests { resourcesRequests[corev1.ResourceName(key)] = *resource.NewQuantity(val, resource.BinarySI) } - for key, val := range zoneParams.Resources.Limits { + for key, val := range poolParams.Resources.Limits { resourcesLimits[corev1.ResourceName(key)] = *resource.NewQuantity(val, resource.BinarySI) } } @@ -1129,14 +1131,14 @@ func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) { // parse Node Affinity nodeSelectorTerms := []corev1.NodeSelectorTerm{} preferredSchedulingTerm := []corev1.PreferredSchedulingTerm{} - if zoneParams.Affinity != nil && zoneParams.Affinity.NodeAffinity != nil { - if zoneParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - for _, elem := range zoneParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { + if poolParams.Affinity != nil && poolParams.Affinity.NodeAffinity != nil { + if poolParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + for _, elem := range poolParams.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { term := parseModelsNodeSelectorTerm(elem) nodeSelectorTerms = append(nodeSelectorTerms, term) } } - for _, elem := range zoneParams.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + for _, elem := range poolParams.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution { pst := corev1.PreferredSchedulingTerm{ Weight: *elem.Weight, Preference: parseModelsNodeSelectorTerm(elem.Preference), @@ -1157,11 +1159,11 @@ func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) { // parse Pod Affinity podAffinityTerms := []corev1.PodAffinityTerm{} weightedPodAffinityTerms := []corev1.WeightedPodAffinityTerm{} - if zoneParams.Affinity != nil && zoneParams.Affinity.PodAffinity != nil { - for _, elem := range zoneParams.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution { + if poolParams.Affinity != nil && poolParams.Affinity.PodAffinity != nil { + for _, elem := range poolParams.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution { podAffinityTerms = append(podAffinityTerms, parseModelPodAffinityTerm(elem)) } - for _, elem := range zoneParams.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + for _, elem := range poolParams.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { wAffinityTerm := corev1.WeightedPodAffinityTerm{ Weight: *elem.Weight, PodAffinityTerm: parseModelPodAffinityTerm(elem.PodAffinityTerm), @@ -1180,11 +1182,11 @@ func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) { // parse Pod Anti Affinity podAntiAffinityTerms := []corev1.PodAffinityTerm{} weightedPodAntiAffinityTerms := []corev1.WeightedPodAffinityTerm{} - if zoneParams.Affinity != nil && zoneParams.Affinity.PodAntiAffinity != nil { - for _, elem := range zoneParams.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution { + if poolParams.Affinity != nil && poolParams.Affinity.PodAntiAffinity != nil { + for _, elem := range poolParams.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution { podAntiAffinityTerms = append(podAntiAffinityTerms, parseModelPodAffinityTerm(elem)) } - for _, elem := range zoneParams.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + for _, elem := range poolParams.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { wAffinityTerm := corev1.WeightedPodAffinityTerm{ Weight: *elem.Weight, PodAffinityTerm: parseModelPodAffinityTerm(elem.PodAffinityTerm), @@ -1211,7 +1213,7 @@ func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) { // parse tolerations tolerations := []corev1.Toleration{} - for _, elem := range zoneParams.Tolerations { + for _, elem := range poolParams.Tolerations { var tolerationSeconds *int64 if elem.TolerationSeconds != nil { // elem.TolerationSeconds.Seconds is allowed to be nil @@ -1232,26 +1234,26 @@ func parseTenantZoneRequest(zoneParams *models.Zone) (*operator.Zone, error) { vct := &corev1.PersistentVolumeClaim{ ObjectMeta: metav1.ObjectMeta{ Name: "data", - Labels: zoneParams.VolumeConfiguration.Labels, - Annotations: zoneParams.VolumeConfiguration.Annotations, + Labels: poolParams.VolumeConfiguration.Labels, + Annotations: poolParams.VolumeConfiguration.Annotations, }, Spec: volTemp, } - zone := &operator.Zone{ - Name: zoneParams.Name, - Servers: int32(*zoneParams.Servers), - VolumesPerServer: *zoneParams.VolumesPerServer, + pool := &operator.Pool{ + Name: poolParams.Name, + Servers: int32(*poolParams.Servers), + VolumesPerServer: *poolParams.VolumesPerServer, VolumeClaimTemplate: vct, Resources: corev1.ResourceRequirements{ Requests: resourcesRequests, Limits: resourcesLimits, }, - NodeSelector: zoneParams.NodeSelector, + NodeSelector: poolParams.NodeSelector, Affinity: affinity, Tolerations: tolerations, } - return zone, nil + return pool, nil } func parseModelPodAffinityTerm(term *models.PodAffinityTerm) corev1.PodAffinityTerm { @@ -1297,30 +1299,30 @@ func parseModelsNodeSelectorTerm(elem *models.NodeSelectorTerm) corev1.NodeSelec return term } -// parseTenantZone operator Zone object and returns the equivalent -// models.Zone object -func parseTenantZone(zone *operator.Zone) *models.Zone { +// parseTenantPool operator pool object and returns the equivalent +// models.Pool object +func parseTenantPool(pool *operator.Pool) *models.Pool { var size *int64 var storageClassName string - if zone.VolumeClaimTemplate != nil { - size = swag.Int64(zone.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value()) - if zone.VolumeClaimTemplate.Spec.StorageClassName != nil { - storageClassName = *zone.VolumeClaimTemplate.Spec.StorageClassName + if pool.VolumeClaimTemplate != nil { + size = swag.Int64(pool.VolumeClaimTemplate.Spec.Resources.Requests.Storage().Value()) + if pool.VolumeClaimTemplate.Spec.StorageClassName != nil { + storageClassName = *pool.VolumeClaimTemplate.Spec.StorageClassName } } // parse resources' requests - var resources *models.ZoneResources + var resources *models.PoolResources resourcesRequests := make(map[string]int64) resourcesLimits := make(map[string]int64) - for key, val := range zone.Resources.Requests { + for key, val := range pool.Resources.Requests { resourcesRequests[key.String()] = val.Value() } - for key, val := range zone.Resources.Limits { + for key, val := range pool.Resources.Limits { resourcesLimits[key.String()] = val.Value() } if len(resourcesRequests) > 0 || len(resourcesLimits) > 0 { - resources = &models.ZoneResources{ + resources = &models.PoolResources{ Limits: resourcesLimits, Requests: resourcesRequests, } @@ -1328,17 +1330,17 @@ func parseTenantZone(zone *operator.Zone) *models.Zone { // parse Node Affinity nodeSelectorTerms := []*models.NodeSelectorTerm{} - preferredSchedulingTerm := []*models.ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{} + preferredSchedulingTerm := []*models.PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{} - if zone.Affinity != nil && zone.Affinity.NodeAffinity != nil { - if zone.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { - for _, elem := range zone.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { + if pool.Affinity != nil && pool.Affinity.NodeAffinity != nil { + if pool.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution != nil { + for _, elem := range pool.Affinity.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { term := parseNodeSelectorTerm(&elem) nodeSelectorTerms = append(nodeSelectorTerms, term) } } - for _, elem := range zone.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution { - pst := &models.ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{ + for _, elem := range pool.Affinity.NodeAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + pst := &models.PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{ Weight: swag.Int32(elem.Weight), Preference: parseNodeSelectorTerm(&elem.Preference), } @@ -1346,10 +1348,10 @@ func parseTenantZone(zone *operator.Zone) *models.Zone { } } - var nodeAffinity *models.ZoneAffinityNodeAffinity + var nodeAffinity *models.PoolAffinityNodeAffinity if len(nodeSelectorTerms) > 0 || len(preferredSchedulingTerm) > 0 { - nodeAffinity = &models.ZoneAffinityNodeAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: &models.ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution{ + nodeAffinity = &models.PoolAffinityNodeAffinity{ + RequiredDuringSchedulingIgnoredDuringExecution: &models.PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution{ NodeSelectorTerms: nodeSelectorTerms, }, PreferredDuringSchedulingIgnoredDuringExecution: preferredSchedulingTerm, @@ -1358,23 +1360,23 @@ func parseTenantZone(zone *operator.Zone) *models.Zone { // parse Pod Affinity podAffinityTerms := []*models.PodAffinityTerm{} - weightedPodAffinityTerms := []*models.ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{} + weightedPodAffinityTerms := []*models.PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{} - if zone.Affinity != nil && zone.Affinity.PodAffinity != nil { - for _, elem := range zone.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution { + if pool.Affinity != nil && pool.Affinity.PodAffinity != nil { + for _, elem := range pool.Affinity.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution { podAffinityTerms = append(podAffinityTerms, parsePodAffinityTerm(&elem)) } - for _, elem := range zone.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { - wAffinityTerm := &models.ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{ + for _, elem := range pool.Affinity.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + wAffinityTerm := &models.PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{ Weight: swag.Int32(elem.Weight), PodAffinityTerm: parsePodAffinityTerm(&elem.PodAffinityTerm), } weightedPodAffinityTerms = append(weightedPodAffinityTerms, wAffinityTerm) } } - var podAffinity *models.ZoneAffinityPodAffinity + var podAffinity *models.PoolAffinityPodAffinity if len(podAffinityTerms) > 0 || len(weightedPodAffinityTerms) > 0 { - podAffinity = &models.ZoneAffinityPodAffinity{ + podAffinity = &models.PoolAffinityPodAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: podAffinityTerms, PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAffinityTerms, } @@ -1382,14 +1384,14 @@ func parseTenantZone(zone *operator.Zone) *models.Zone { // parse Pod Anti Affinity podAntiAffinityTerms := []*models.PodAffinityTerm{} - weightedPodAntiAffinityTerms := []*models.ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{} + weightedPodAntiAffinityTerms := []*models.PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{} - if zone.Affinity != nil && zone.Affinity.PodAntiAffinity != nil { - for _, elem := range zone.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution { + if pool.Affinity != nil && pool.Affinity.PodAntiAffinity != nil { + for _, elem := range pool.Affinity.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution { podAntiAffinityTerms = append(podAntiAffinityTerms, parsePodAffinityTerm(&elem)) } - for _, elem := range zone.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { - wAffinityTerm := &models.ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{ + for _, elem := range pool.Affinity.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution { + wAffinityTerm := &models.PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0{ Weight: swag.Int32(elem.Weight), PodAffinityTerm: parsePodAffinityTerm(&elem.PodAffinityTerm), } @@ -1397,18 +1399,18 @@ func parseTenantZone(zone *operator.Zone) *models.Zone { } } - var podAntiAffinity *models.ZoneAffinityPodAntiAffinity + var podAntiAffinity *models.PoolAffinityPodAntiAffinity if len(podAntiAffinityTerms) > 0 || len(weightedPodAntiAffinityTerms) > 0 { - podAntiAffinity = &models.ZoneAffinityPodAntiAffinity{ + podAntiAffinity = &models.PoolAffinityPodAntiAffinity{ RequiredDuringSchedulingIgnoredDuringExecution: podAntiAffinityTerms, PreferredDuringSchedulingIgnoredDuringExecution: weightedPodAntiAffinityTerms, } } // build affinity object - var affinity *models.ZoneAffinity + var affinity *models.PoolAffinity if nodeAffinity != nil || podAffinity != nil || podAntiAffinity != nil { - affinity = &models.ZoneAffinity{ + affinity = &models.PoolAffinity{ NodeAffinity: nodeAffinity, PodAffinity: podAffinity, PodAntiAffinity: podAntiAffinity, @@ -1416,15 +1418,15 @@ func parseTenantZone(zone *operator.Zone) *models.Zone { } // parse tolerations - var tolerations models.ZoneTolerations - for _, elem := range zone.Tolerations { - var tolerationSecs *models.ZoneTolerationSeconds + var tolerations models.PoolTolerations + for _, elem := range pool.Tolerations { + var tolerationSecs *models.PoolTolerationSeconds if elem.TolerationSeconds != nil { - tolerationSecs = &models.ZoneTolerationSeconds{ + tolerationSecs = &models.PoolTolerationSeconds{ Seconds: elem.TolerationSeconds, } } - toleration := &models.ZoneTolerationsItems0{ + toleration := &models.PoolTolerationsItems0{ Key: elem.Key, Operator: string(elem.Operator), Value: elem.Value, @@ -1434,20 +1436,20 @@ func parseTenantZone(zone *operator.Zone) *models.Zone { tolerations = append(tolerations, toleration) } - zoneModel := &models.Zone{ - Name: zone.Name, - Servers: swag.Int64(int64(zone.Servers)), - VolumesPerServer: swag.Int32(zone.VolumesPerServer), - VolumeConfiguration: &models.ZoneVolumeConfiguration{ + poolModel := &models.Pool{ + Name: pool.Name, + Servers: swag.Int64(int64(pool.Servers)), + VolumesPerServer: swag.Int32(pool.VolumesPerServer), + VolumeConfiguration: &models.PoolVolumeConfiguration{ Size: size, StorageClassName: storageClassName, }, - NodeSelector: zone.NodeSelector, + NodeSelector: pool.NodeSelector, Resources: resources, Affinity: affinity, Tolerations: tolerations, } - return zoneModel + return poolModel } func parsePodAffinityTerm(term *corev1.PodAffinityTerm) *models.PodAffinityTerm { @@ -1493,7 +1495,7 @@ func parseNodeSelectorTerm(term *corev1.NodeSelectorTerm) *models.NodeSelectorTe return &t } -func getTenantUpdateZoneResponse(session *models.Principal, params admin_api.TenantUpdateZonesParams) (*models.Tenant, *models.Error) { +func getTenantUpdatePoolResponse(session *models.Principal, params admin_api.TenantUpdatePoolsParams) (*models.Tenant, *models.Error) { ctx := context.Background() opClientClientSet, err := cluster.OperatorClient(session.SessionToken) if err != nil { @@ -1504,9 +1506,9 @@ func getTenantUpdateZoneResponse(session *models.Principal, params admin_api.Ten client: opClientClientSet, } - t, err := updateTenantZones(ctx, opClient, params.Namespace, params.Tenant, params.Body.Zones) + t, err := updateTenantPools(ctx, opClient, params.Namespace, params.Tenant, params.Body.Pools) if err != nil { - log.Println("error updating Tenant's zones:", err) + log.Println("error updating Tenant's pools:", err) return nil, prepareError(err) } @@ -1515,33 +1517,33 @@ func getTenantUpdateZoneResponse(session *models.Principal, params admin_api.Ten return tenant, nil } -// updateTenantZones Sets the Tenant's zones to the ones provided by the request +// updateTenantPools Sets the Tenant's pools to the ones provided by the request // -// It does the equivalent to a PUT request on Tenant's zones -func updateTenantZones( +// It does the equivalent to a PUT request on Tenant's pools +func updateTenantPools( ctx context.Context, operatorClient OperatorClientI, namespace string, tenantName string, - zonesReq []*models.Zone) (*operator.Tenant, error) { + poolsReq []*models.Pool) (*operator.Tenant, error) { minInst, err := operatorClient.TenantGet(ctx, namespace, tenantName, metav1.GetOptions{}) if err != nil { return nil, err } - // set the zones if they are provided - var newZoneArray []operator.Zone - for _, zone := range zonesReq { - zone, err := parseTenantZoneRequest(zone) + // set the pools if they are provided + var newPoolArray []operator.Pool + for _, pool := range poolsReq { + pool, err := parseTenantPoolRequest(pool) if err != nil { return nil, err } - newZoneArray = append(newZoneArray, *zone) + newPoolArray = append(newPoolArray, *pool) } - // replace zones array - minInst.Spec.Zones = newZoneArray + // replace pools array + minInst.Spec.Pools = newPoolArray minInst = minInst.DeepCopy() minInst.EnsureDefaults() diff --git a/restapi/admin_tenants_test.go b/restapi/admin_tenants_test.go index ced9057e9..344e1cf0d 100644 --- a/restapi/admin_tenants_test.go +++ b/restapi/admin_tenants_test.go @@ -287,9 +287,9 @@ func Test_TenantInfo(t *testing.T) { Namespace: "minio-ns", }, Spec: operator.TenantSpec{ - Zones: []operator.Zone{ + Pools: []operator.Pool{ { - Name: "zone1", + Name: "pool1", Servers: int32(2), VolumesPerServer: 4, VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ @@ -317,12 +317,12 @@ func Test_TenantInfo(t *testing.T) { Name: "tenant1", TotalSize: int64(8388608), CurrentState: "ready", - Zones: []*models.Zone{ + Pools: []*models.Pool{ { - Name: "zone1", + Name: "pool1", Servers: swag.Int64(int64(2)), VolumesPerServer: swag.Int32(4), - VolumeConfiguration: &models.ZoneVolumeConfiguration{ + VolumeConfiguration: &models.PoolVolumeConfiguration{ StorageClassName: "standard", Size: swag.Int64(1024 * 1024), }, @@ -352,9 +352,9 @@ func Test_TenantInfo(t *testing.T) { }, }, Spec: operator.TenantSpec{ - Zones: []operator.Zone{ + Pools: []operator.Pool{ { - Name: "zone1", + Name: "pool1", Servers: int32(2), VolumesPerServer: 4, VolumeClaimTemplate: &corev1.PersistentVolumeClaim{ @@ -382,12 +382,12 @@ func Test_TenantInfo(t *testing.T) { Name: "tenant1", TotalSize: int64(8388608), CurrentState: "ready", - Zones: []*models.Zone{ + Pools: []*models.Pool{ { - Name: "zone1", + Name: "pool1", Servers: swag.Int64(int64(2)), VolumesPerServer: swag.Int32(4), - VolumeConfiguration: &models.ZoneVolumeConfiguration{ + VolumeConfiguration: &models.PoolVolumeConfiguration{ StorageClassName: "standard", Size: swag.Int64(1024 * 1024), }, @@ -414,7 +414,7 @@ func Test_TenantInfo(t *testing.T) { }, }, Spec: operator.TenantSpec{ - Zones: []operator.Zone{}, + Pools: []operator.Pool{}, Image: "minio/minio:RELEASE.2020-06-14T18-32-17Z", }, Status: operator.TenantStatus{ @@ -446,7 +446,7 @@ func Test_TenantInfo(t *testing.T) { }, }, Spec: operator.TenantSpec{ - Zones: []operator.Zone{}, + Pools: []operator.Pool{}, Image: "minio/minio:RELEASE.2020-06-14T18-32-17Z", Console: &operator.ConsoleConfiguration{ Image: "minio/console:master", @@ -540,7 +540,7 @@ func Test_deleteTenantAction(t *testing.T) { Namespace: "minio-tenant", Labels: map[string]string{ operator.TenantLabel: "tenant1", - operator.ZoneLabel: "zone-1", + operator.PoolLabel: "pool-1", }, }, }, @@ -567,7 +567,7 @@ func Test_deleteTenantAction(t *testing.T) { Namespace: "minio-tenant", Labels: map[string]string{ operator.TenantLabel: "tenant1", - operator.ZoneLabel: "zone-1", + operator.PoolLabel: "pool-1", }, }, }, @@ -594,7 +594,7 @@ func Test_deleteTenantAction(t *testing.T) { Namespace: "minio-tenant", Labels: map[string]string{ operator.TenantLabel: "tenant1", - operator.ZoneLabel: "zone-1", + operator.PoolLabel: "pool-1", }, }, }, @@ -621,7 +621,7 @@ func Test_deleteTenantAction(t *testing.T) { Namespace: "minio-tenant", Labels: map[string]string{ operator.TenantLabel: "tenant1", - operator.ZoneLabel: "zone-1", + operator.PoolLabel: "pool-1", }, }, }, @@ -649,7 +649,7 @@ func Test_deleteTenantAction(t *testing.T) { Namespace: "minio-tenant", Labels: map[string]string{ operator.TenantLabel: "tenant1", - operator.ZoneLabel: "zone-1", + operator.PoolLabel: "pool-1", }, }, }, @@ -672,7 +672,7 @@ func Test_deleteTenantAction(t *testing.T) { } } -func Test_TenantAddZone(t *testing.T) { +func Test_TenantAddPool(t *testing.T) { opClient := opClientMock{} type args struct { @@ -681,7 +681,7 @@ func Test_TenantAddZone(t *testing.T) { nameSpace string mockTenantPatch func(ctx context.Context, namespace string, tenantName string, pt types.PatchType, data []byte, options metav1.PatchOptions) (*v1.Tenant, error) mockTenantGet func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) - params admin_api.TenantAddZoneParams + params admin_api.TenantAddPoolParams } tests := []struct { name string @@ -689,7 +689,7 @@ func Test_TenantAddZone(t *testing.T) { wantErr bool }{ { - name: "Add zone, no errors", + name: "Add pool, no errors", args: args{ ctx: context.Background(), operatorClient: opClient, @@ -700,11 +700,11 @@ func Test_TenantAddZone(t *testing.T) { mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) { return &v1.Tenant{}, nil }, - params: admin_api.TenantAddZoneParams{ - Body: &models.Zone{ - Name: "zone-1", + params: admin_api.TenantAddPoolParams{ + Body: &models.Pool{ + Name: "pool-1", Servers: swag.Int64(int64(4)), - VolumeConfiguration: &models.ZoneVolumeConfiguration{ + VolumeConfiguration: &models.PoolVolumeConfiguration{ Size: swag.Int64(2147483648), StorageClassName: "standard", }, @@ -714,7 +714,7 @@ func Test_TenantAddZone(t *testing.T) { }, wantErr: false, }, { - name: "Add zone, error size", + name: "Add pool, error size", args: args{ ctx: context.Background(), operatorClient: opClient, @@ -725,11 +725,11 @@ func Test_TenantAddZone(t *testing.T) { mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) { return &v1.Tenant{}, nil }, - params: admin_api.TenantAddZoneParams{ - Body: &models.Zone{ - Name: "zone-1", + params: admin_api.TenantAddPoolParams{ + Body: &models.Pool{ + Name: "pool-1", Servers: swag.Int64(int64(4)), - VolumeConfiguration: &models.ZoneVolumeConfiguration{ + VolumeConfiguration: &models.PoolVolumeConfiguration{ Size: swag.Int64(0), StorageClassName: "standard", }, @@ -740,7 +740,7 @@ func Test_TenantAddZone(t *testing.T) { wantErr: true, }, { - name: "Add zone, error servers negative", + name: "Add pool, error servers negative", args: args{ ctx: context.Background(), operatorClient: opClient, @@ -751,11 +751,11 @@ func Test_TenantAddZone(t *testing.T) { mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) { return &v1.Tenant{}, nil }, - params: admin_api.TenantAddZoneParams{ - Body: &models.Zone{ - Name: "zone-1", + params: admin_api.TenantAddPoolParams{ + Body: &models.Pool{ + Name: "pool-1", Servers: swag.Int64(int64(-1)), - VolumeConfiguration: &models.ZoneVolumeConfiguration{ + VolumeConfiguration: &models.PoolVolumeConfiguration{ Size: swag.Int64(2147483648), StorageClassName: "standard", }, @@ -766,7 +766,7 @@ func Test_TenantAddZone(t *testing.T) { wantErr: true, }, { - name: "Add zone, error volumes per server negative", + name: "Add pool, error volumes per server negative", args: args{ ctx: context.Background(), operatorClient: opClient, @@ -777,11 +777,11 @@ func Test_TenantAddZone(t *testing.T) { mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) { return &v1.Tenant{}, nil }, - params: admin_api.TenantAddZoneParams{ - Body: &models.Zone{ - Name: "zone-1", + params: admin_api.TenantAddPoolParams{ + Body: &models.Pool{ + Name: "pool-1", Servers: swag.Int64(int64(4)), - VolumeConfiguration: &models.ZoneVolumeConfiguration{ + VolumeConfiguration: &models.PoolVolumeConfiguration{ Size: swag.Int64(2147483648), StorageClassName: "standard", }, @@ -803,9 +803,9 @@ func Test_TenantAddZone(t *testing.T) { mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) { return &v1.Tenant{}, nil }, - params: admin_api.TenantAddZoneParams{ - Body: &models.Zone{ - Name: "zone-1", + params: admin_api.TenantAddPoolParams{ + Body: &models.Pool{ + Name: "pool-1", Servers: swag.Int64(int64(4)), }, }, @@ -824,9 +824,9 @@ func Test_TenantAddZone(t *testing.T) { mockTenantGet: func(ctx context.Context, namespace string, tenantName string, options metav1.GetOptions) (*v1.Tenant, error) { return nil, errors.New("errors") }, - params: admin_api.TenantAddZoneParams{ - Body: &models.Zone{ - Name: "zone-1", + params: admin_api.TenantAddPoolParams{ + Body: &models.Pool{ + Name: "pool-1", Servers: swag.Int64(int64(4)), }, }, @@ -838,8 +838,8 @@ func Test_TenantAddZone(t *testing.T) { opClientTenantGetMock = tt.args.mockTenantGet opClientTenantPatchMock = tt.args.mockTenantPatch t.Run(tt.name, func(t *testing.T) { - if err := addTenantZone(tt.args.ctx, tt.args.operatorClient, tt.args.params); (err != nil) != tt.wantErr { - t.Errorf("addTenantZone() error = %v, wantErr %v", err, tt.wantErr) + if err := addTenantPool(tt.args.ctx, tt.args.operatorClient, tt.args.params); (err != nil) != tt.wantErr { + t.Errorf("addTenantPool() error = %v, wantErr %v", err, tt.wantErr) } }) } diff --git a/restapi/embedded_spec.go b/restapi/embedded_spec.go index 95c3c2d1f..618684ef5 100644 --- a/restapi/embedded_spec.go +++ b/restapi/embedded_spec.go @@ -1933,6 +1933,91 @@ func init() { } } }, + "/namespaces/{namespace}/tenants/{tenant}/pools": { + "put": { + "tags": [ + "AdminAPI" + ], + "summary": "Tenant Update Pools", + "operationId": "TenantUpdatePools", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenant", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/poolUpdateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/tenant" + } + }, + "default": { + "description": "Generic error response.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + }, + "post": { + "tags": [ + "AdminAPI" + ], + "summary": "Tenant Add Pool", + "operationId": "TenantAddPool", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenant", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/pool" + } + } + ], + "responses": { + "201": { + "description": "A successful response." + }, + "default": { + "description": "Generic error response.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/namespaces/{namespace}/tenants/{tenant}/usage": { "get": { "tags": [ @@ -1970,91 +2055,6 @@ func init() { } } }, - "/namespaces/{namespace}/tenants/{tenant}/zones": { - "put": { - "tags": [ - "AdminAPI" - ], - "summary": "Tenant Update Zones", - "operationId": "TenantUpdateZones", - "parameters": [ - { - "type": "string", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "tenant", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/zoneUpdateRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/tenant" - } - }, - "default": { - "description": "Generic error response.", - "schema": { - "$ref": "#/definitions/error" - } - } - } - }, - "post": { - "tags": [ - "AdminAPI" - ], - "summary": "Tenant Add Zone", - "operationId": "TenantAddZone", - "parameters": [ - { - "type": "string", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "tenant", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/zone" - } - } - ], - "responses": { - "201": { - "description": "A successful response." - }, - "default": { - "description": "Generic error response.", - "schema": { - "$ref": "#/definitions/error" - } - } - } - } - }, "/operator/{namespace}/{tenant}/buckets": { "get": { "tags": [ @@ -3346,7 +3346,7 @@ func init() { "required": [ "name", "namespace", - "zones" + "pools" ], "properties": { "access_key": { @@ -3413,18 +3413,18 @@ func init() { "namespace": { "type": "string" }, + "pools": { + "type": "array", + "items": { + "$ref": "#/definitions/pool" + } + }, "secret_key": { "type": "string" }, "tls": { "type": "object", "$ref": "#/definitions/tlsConfiguration" - }, - "zones": { - "type": "array", - "items": { - "$ref": "#/definitions/zone" - } } } }, @@ -4225,6 +4225,268 @@ func init() { "group" ] }, + "pool": { + "type": "object", + "required": [ + "servers", + "volumes_per_server", + "volume_configuration" + ], + "properties": { + "affinity": { + "$ref": "#/definitions/poolAffinity" + }, + "name": { + "type": "string" + }, + "node_selector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "resources": { + "$ref": "#/definitions/poolResources" + }, + "servers": { + "type": "integer" + }, + "tolerations": { + "$ref": "#/definitions/poolTolerations" + }, + "volume_configuration": { + "type": "object", + "required": [ + "size" + ], + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "size": { + "type": "integer" + }, + "storage_class_name": { + "type": "string" + } + } + }, + "volumes_per_server": { + "type": "integer", + "format": "int32" + } + } + }, + "poolAffinity": { + "description": "If specified, affinity will define the pod's scheduling constraints", + "type": "object", + "properties": { + "nodeAffinity": { + "description": "Describes node affinity scheduling rules for the pod.", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "type": "object", + "required": [ + "preference", + "weight" + ], + "properties": { + "preference": { + "description": "A node selector term, associated with the corresponding weight.", + "type": "object", + "$ref": "#/definitions/nodeSelectorTerm" + }, + "weight": { + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "type": "object", + "required": [ + "nodeSelectorTerms" + ], + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "type": "array", + "items": { + "$ref": "#/definitions/nodeSelectorTerm" + } + } + } + } + } + }, + "podAffinity": { + "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, pool, etc. as some other pod(s)).", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "type": "object", + "required": [ + "podAffinityTerm", + "weight" + ], + "properties": { + "podAffinityTerm": { + "$ref": "#/definitions/podAffinityTerm" + }, + "weight": { + "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/podAffinityTerm" + } + } + } + }, + "podAntiAffinity": { + "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, pool, etc. as some other pod(s)).", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "type": "object", + "required": [ + "podAffinityTerm", + "weight" + ], + "properties": { + "podAffinityTerm": { + "$ref": "#/definitions/podAffinityTerm" + }, + "weight": { + "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/podAffinityTerm" + } + } + } + } + } + }, + "poolResources": { + "description": "If provided, use these requests and limit for cpu/memory resource allocation", + "type": "object", + "properties": { + "limits": { + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int64" + } + }, + "requests": { + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int64" + } + } + } + }, + "poolTolerationSeconds": { + "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", + "type": "object", + "required": [ + "seconds" + ], + "properties": { + "seconds": { + "type": "integer", + "format": "int64" + } + } + }, + "poolTolerations": { + "description": "Tolerations allows users to set entries like effect, key, operator, value.", + "type": "array", + "items": { + "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", + "type": "object", + "properties": { + "effect": { + "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" + }, + "key": { + "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "type": "string" + }, + "operator": { + "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "type": "string" + }, + "tolerationSeconds": { + "$ref": "#/definitions/poolTolerationSeconds" + }, + "value": { + "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "type": "string" + } + } + } + }, + "poolUpdateRequest": { + "type": "object", + "required": [ + "pools" + ], + "properties": { + "pools": { + "type": "array", + "items": { + "$ref": "#/definitions/pool" + } + } + } + }, "principal": { "type": "object", "properties": { @@ -4587,15 +4849,15 @@ func init() { "namespace": { "type": "string" }, + "pools": { + "type": "array", + "items": { + "$ref": "#/definitions/pool" + } + }, "total_size": { "type": "integer", "format": "int64" - }, - "zones": { - "type": "array", - "items": { - "$ref": "#/definitions/zone" - } } } }, @@ -4620,14 +4882,14 @@ func init() { "namespace": { "type": "string" }, + "pool_count": { + "type": "integer" + }, "total_size": { "type": "integer" }, "volume_count": { "type": "integer" - }, - "zone_count": { - "type": "integer" } } }, @@ -4816,268 +5078,6 @@ func init() { } } } - }, - "zone": { - "type": "object", - "required": [ - "servers", - "volumes_per_server", - "volume_configuration" - ], - "properties": { - "affinity": { - "$ref": "#/definitions/zoneAffinity" - }, - "name": { - "type": "string" - }, - "node_selector": { - "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "resources": { - "$ref": "#/definitions/zoneResources" - }, - "servers": { - "type": "integer" - }, - "tolerations": { - "$ref": "#/definitions/zoneTolerations" - }, - "volume_configuration": { - "type": "object", - "required": [ - "size" - ], - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "size": { - "type": "integer" - }, - "storage_class_name": { - "type": "string" - } - } - }, - "volumes_per_server": { - "type": "integer", - "format": "int32" - } - } - }, - "zoneAffinity": { - "description": "If specified, affinity will define the pod's scheduling constraints", - "type": "object", - "properties": { - "nodeAffinity": { - "description": "Describes node affinity scheduling rules for the pod.", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", - "type": "object", - "required": [ - "preference", - "weight" - ], - "properties": { - "preference": { - "description": "A node selector term, associated with the corresponding weight.", - "type": "object", - "$ref": "#/definitions/nodeSelectorTerm" - }, - "weight": { - "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", - "type": "object", - "required": [ - "nodeSelectorTerms" - ], - "properties": { - "nodeSelectorTerms": { - "description": "Required. A list of node selector terms. The terms are ORed.", - "type": "array", - "items": { - "$ref": "#/definitions/nodeSelectorTerm" - } - } - } - } - } - }, - "podAffinity": { - "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "type": "object", - "required": [ - "podAffinityTerm", - "weight" - ], - "properties": { - "podAffinityTerm": { - "$ref": "#/definitions/podAffinityTerm" - }, - "weight": { - "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "$ref": "#/definitions/podAffinityTerm" - } - } - } - }, - "podAntiAffinity": { - "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "type": "object", - "required": [ - "podAffinityTerm", - "weight" - ], - "properties": { - "podAffinityTerm": { - "$ref": "#/definitions/podAffinityTerm" - }, - "weight": { - "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "$ref": "#/definitions/podAffinityTerm" - } - } - } - } - } - }, - "zoneResources": { - "description": "If provided, use these requests and limit for cpu/memory resource allocation", - "type": "object", - "properties": { - "limits": { - "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "type": "integer", - "format": "int64" - } - }, - "requests": { - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "type": "integer", - "format": "int64" - } - } - } - }, - "zoneTolerationSeconds": { - "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", - "type": "object", - "required": [ - "seconds" - ], - "properties": { - "seconds": { - "type": "integer", - "format": "int64" - } - } - }, - "zoneTolerations": { - "description": "Tolerations allows users to set entries like effect, key, operator, value.", - "type": "array", - "items": { - "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", - "type": "object", - "properties": { - "effect": { - "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", - "type": "string" - }, - "key": { - "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", - "type": "string" - }, - "operator": { - "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", - "type": "string" - }, - "tolerationSeconds": { - "$ref": "#/definitions/zoneTolerationSeconds" - }, - "value": { - "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", - "type": "string" - } - } - } - }, - "zoneUpdateRequest": { - "type": "object", - "required": [ - "zones" - ], - "properties": { - "zones": { - "type": "array", - "items": { - "$ref": "#/definitions/zone" - } - } - } } }, "securityDefinitions": { @@ -6993,6 +6993,91 @@ func init() { } } }, + "/namespaces/{namespace}/tenants/{tenant}/pools": { + "put": { + "tags": [ + "AdminAPI" + ], + "summary": "Tenant Update Pools", + "operationId": "TenantUpdatePools", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenant", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/poolUpdateRequest" + } + } + ], + "responses": { + "200": { + "description": "A successful response.", + "schema": { + "$ref": "#/definitions/tenant" + } + }, + "default": { + "description": "Generic error response.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + }, + "post": { + "tags": [ + "AdminAPI" + ], + "summary": "Tenant Add Pool", + "operationId": "TenantAddPool", + "parameters": [ + { + "type": "string", + "name": "namespace", + "in": "path", + "required": true + }, + { + "type": "string", + "name": "tenant", + "in": "path", + "required": true + }, + { + "name": "body", + "in": "body", + "required": true, + "schema": { + "$ref": "#/definitions/pool" + } + } + ], + "responses": { + "201": { + "description": "A successful response." + }, + "default": { + "description": "Generic error response.", + "schema": { + "$ref": "#/definitions/error" + } + } + } + } + }, "/namespaces/{namespace}/tenants/{tenant}/usage": { "get": { "tags": [ @@ -7030,91 +7115,6 @@ func init() { } } }, - "/namespaces/{namespace}/tenants/{tenant}/zones": { - "put": { - "tags": [ - "AdminAPI" - ], - "summary": "Tenant Update Zones", - "operationId": "TenantUpdateZones", - "parameters": [ - { - "type": "string", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "tenant", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/zoneUpdateRequest" - } - } - ], - "responses": { - "200": { - "description": "A successful response.", - "schema": { - "$ref": "#/definitions/tenant" - } - }, - "default": { - "description": "Generic error response.", - "schema": { - "$ref": "#/definitions/error" - } - } - } - }, - "post": { - "tags": [ - "AdminAPI" - ], - "summary": "Tenant Add Zone", - "operationId": "TenantAddZone", - "parameters": [ - { - "type": "string", - "name": "namespace", - "in": "path", - "required": true - }, - { - "type": "string", - "name": "tenant", - "in": "path", - "required": true - }, - { - "name": "body", - "in": "body", - "required": true, - "schema": { - "$ref": "#/definitions/zone" - } - } - ], - "responses": { - "201": { - "description": "A successful response." - }, - "default": { - "description": "Generic error response.", - "schema": { - "$ref": "#/definitions/error" - } - } - } - } - }, "/operator/{namespace}/{tenant}/buckets": { "get": { "tags": [ @@ -8255,6 +8255,198 @@ func init() { } } }, + "PoolAffinityNodeAffinity": { + "description": "Describes node affinity scheduling rules for the pod.", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "type": "object", + "required": [ + "nodeSelectorTerms" + ], + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "type": "array", + "items": { + "$ref": "#/definitions/nodeSelectorTerm" + } + } + } + } + } + }, + "PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0": { + "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", + "type": "object", + "required": [ + "preference", + "weight" + ], + "properties": { + "preference": { + "description": "A node selector term, associated with the corresponding weight.", + "type": "object", + "$ref": "#/definitions/nodeSelectorTerm" + }, + "weight": { + "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + }, + "PoolAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "type": "object", + "required": [ + "nodeSelectorTerms" + ], + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "type": "array", + "items": { + "$ref": "#/definitions/nodeSelectorTerm" + } + } + } + }, + "PoolAffinityPodAffinity": { + "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, pool, etc. as some other pod(s)).", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/podAffinityTerm" + } + } + } + }, + "PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0": { + "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "type": "object", + "required": [ + "podAffinityTerm", + "weight" + ], + "properties": { + "podAffinityTerm": { + "$ref": "#/definitions/podAffinityTerm" + }, + "weight": { + "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + }, + "PoolAffinityPodAntiAffinity": { + "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, pool, etc. as some other pod(s)).", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/podAffinityTerm" + } + } + } + }, + "PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0": { + "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", + "type": "object", + "required": [ + "podAffinityTerm", + "weight" + ], + "properties": { + "podAffinityTerm": { + "$ref": "#/definitions/podAffinityTerm" + }, + "weight": { + "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", + "type": "integer", + "format": "int32" + } + } + }, + "PoolTolerationsItems0": { + "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", + "type": "object", + "properties": { + "effect": { + "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", + "type": "string" + }, + "key": { + "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", + "type": "string" + }, + "operator": { + "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", + "type": "string" + }, + "tolerationSeconds": { + "$ref": "#/definitions/poolTolerationSeconds" + }, + "value": { + "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", + "type": "string" + } + } + }, + "PoolVolumeConfiguration": { + "type": "object", + "required": [ + "size" + ], + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "size": { + "type": "integer" + }, + "storage_class_name": { + "type": "string" + } + } + }, "VaultConfigurationApprole": { "type": "object", "required": [ @@ -8300,198 +8492,6 @@ func init() { } } }, - "ZoneAffinityNodeAffinity": { - "description": "Describes node affinity scheduling rules for the pod.", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "$ref": "#/definitions/ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", - "type": "object", - "required": [ - "nodeSelectorTerms" - ], - "properties": { - "nodeSelectorTerms": { - "description": "Required. A list of node selector terms. The terms are ORed.", - "type": "array", - "items": { - "$ref": "#/definitions/nodeSelectorTerm" - } - } - } - } - } - }, - "ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0": { - "description": "An empty preferred scheduling term matches all objects with implicit weight 0 (i.e. it's a no-op). A null preferred scheduling term matches no objects (i.e. is also a no-op).", - "type": "object", - "required": [ - "preference", - "weight" - ], - "properties": { - "preference": { - "description": "A node selector term, associated with the corresponding weight.", - "type": "object", - "$ref": "#/definitions/nodeSelectorTerm" - }, - "weight": { - "description": "Weight associated with matching the corresponding nodeSelectorTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - }, - "ZoneAffinityNodeAffinityRequiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", - "type": "object", - "required": [ - "nodeSelectorTerms" - ], - "properties": { - "nodeSelectorTerms": { - "description": "Required. A list of node selector terms. The terms are ORed.", - "type": "array", - "items": { - "$ref": "#/definitions/nodeSelectorTerm" - } - } - } - }, - "ZoneAffinityPodAffinity": { - "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "$ref": "#/definitions/ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "$ref": "#/definitions/podAffinityTerm" - } - } - } - }, - "ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0": { - "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "type": "object", - "required": [ - "podAffinityTerm", - "weight" - ], - "properties": { - "podAffinityTerm": { - "$ref": "#/definitions/podAffinityTerm" - }, - "weight": { - "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - }, - "ZoneAffinityPodAntiAffinity": { - "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "$ref": "#/definitions/ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "$ref": "#/definitions/podAffinityTerm" - } - } - } - }, - "ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0": { - "description": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)", - "type": "object", - "required": [ - "podAffinityTerm", - "weight" - ], - "properties": { - "podAffinityTerm": { - "$ref": "#/definitions/podAffinityTerm" - }, - "weight": { - "description": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.", - "type": "integer", - "format": "int32" - } - } - }, - "ZoneTolerationsItems0": { - "description": "The pod this Toleration is attached to tolerates any taint that matches the triple \u003ckey,value,effect\u003e using the matching operator \u003coperator\u003e.", - "type": "object", - "properties": { - "effect": { - "description": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule, PreferNoSchedule and NoExecute.", - "type": "string" - }, - "key": { - "description": "Key is the taint key that the toleration applies to. Empty means match all taint keys. If the key is empty, operator must be Exists; this combination means to match all values and all keys.", - "type": "string" - }, - "operator": { - "description": "Operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.", - "type": "string" - }, - "tolerationSeconds": { - "$ref": "#/definitions/zoneTolerationSeconds" - }, - "value": { - "description": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.", - "type": "string" - } - } - }, - "ZoneVolumeConfiguration": { - "type": "object", - "required": [ - "size" - ], - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "size": { - "type": "integer" - }, - "storage_class_name": { - "type": "string" - } - } - }, "addBucketReplication": { "type": "object", "properties": { @@ -8929,7 +8929,7 @@ func init() { "required": [ "name", "namespace", - "zones" + "pools" ], "properties": { "access_key": { @@ -8996,18 +8996,18 @@ func init() { "namespace": { "type": "string" }, + "pools": { + "type": "array", + "items": { + "$ref": "#/definitions/pool" + } + }, "secret_key": { "type": "string" }, "tls": { "type": "object", "$ref": "#/definitions/tlsConfiguration" - }, - "zones": { - "type": "array", - "items": { - "$ref": "#/definitions/zone" - } } } }, @@ -9742,6 +9742,199 @@ func init() { "group" ] }, + "pool": { + "type": "object", + "required": [ + "servers", + "volumes_per_server", + "volume_configuration" + ], + "properties": { + "affinity": { + "$ref": "#/definitions/poolAffinity" + }, + "name": { + "type": "string" + }, + "node_selector": { + "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "resources": { + "$ref": "#/definitions/poolResources" + }, + "servers": { + "type": "integer" + }, + "tolerations": { + "$ref": "#/definitions/poolTolerations" + }, + "volume_configuration": { + "type": "object", + "required": [ + "size" + ], + "properties": { + "annotations": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "labels": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "size": { + "type": "integer" + }, + "storage_class_name": { + "type": "string" + } + } + }, + "volumes_per_server": { + "type": "integer", + "format": "int32" + } + } + }, + "poolAffinity": { + "description": "If specified, affinity will define the pod's scheduling constraints", + "type": "object", + "properties": { + "nodeAffinity": { + "description": "Describes node affinity scheduling rules for the pod.", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/PoolAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", + "type": "object", + "required": [ + "nodeSelectorTerms" + ], + "properties": { + "nodeSelectorTerms": { + "description": "Required. A list of node selector terms. The terms are ORed.", + "type": "array", + "items": { + "$ref": "#/definitions/nodeSelectorTerm" + } + } + } + } + } + }, + "podAffinity": { + "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, pool, etc. as some other pod(s)).", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/PoolAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/podAffinityTerm" + } + } + } + }, + "podAntiAffinity": { + "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, pool, etc. as some other pod(s)).", + "type": "object", + "properties": { + "preferredDuringSchedulingIgnoredDuringExecution": { + "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", + "type": "array", + "items": { + "$ref": "#/definitions/PoolAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" + } + }, + "requiredDuringSchedulingIgnoredDuringExecution": { + "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", + "type": "array", + "items": { + "$ref": "#/definitions/podAffinityTerm" + } + } + } + } + } + }, + "poolResources": { + "description": "If provided, use these requests and limit for cpu/memory resource allocation", + "type": "object", + "properties": { + "limits": { + "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int64" + } + }, + "requests": { + "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int64" + } + } + } + }, + "poolTolerationSeconds": { + "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", + "type": "object", + "required": [ + "seconds" + ], + "properties": { + "seconds": { + "type": "integer", + "format": "int64" + } + } + }, + "poolTolerations": { + "description": "Tolerations allows users to set entries like effect, key, operator, value.", + "type": "array", + "items": { + "$ref": "#/definitions/PoolTolerationsItems0" + } + }, + "poolUpdateRequest": { + "type": "object", + "required": [ + "pools" + ], + "properties": { + "pools": { + "type": "array", + "items": { + "$ref": "#/definitions/pool" + } + } + } + }, "principal": { "type": "object", "properties": { @@ -10104,15 +10297,15 @@ func init() { "namespace": { "type": "string" }, + "pools": { + "type": "array", + "items": { + "$ref": "#/definitions/pool" + } + }, "total_size": { "type": "integer", "format": "int64" - }, - "zones": { - "type": "array", - "items": { - "$ref": "#/definitions/zone" - } } } }, @@ -10137,14 +10330,14 @@ func init() { "namespace": { "type": "string" }, + "pool_count": { + "type": "integer" + }, "total_size": { "type": "integer" }, "volume_count": { "type": "integer" - }, - "zone_count": { - "type": "integer" } } }, @@ -10333,199 +10526,6 @@ func init() { } } } - }, - "zone": { - "type": "object", - "required": [ - "servers", - "volumes_per_server", - "volume_configuration" - ], - "properties": { - "affinity": { - "$ref": "#/definitions/zoneAffinity" - }, - "name": { - "type": "string" - }, - "node_selector": { - "description": "NodeSelector is a selector which must be true for the pod to fit on a node. Selector which must match a node's labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/", - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "resources": { - "$ref": "#/definitions/zoneResources" - }, - "servers": { - "type": "integer" - }, - "tolerations": { - "$ref": "#/definitions/zoneTolerations" - }, - "volume_configuration": { - "type": "object", - "required": [ - "size" - ], - "properties": { - "annotations": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "labels": { - "type": "object", - "additionalProperties": { - "type": "string" - } - }, - "size": { - "type": "integer" - }, - "storage_class_name": { - "type": "string" - } - } - }, - "volumes_per_server": { - "type": "integer", - "format": "int32" - } - } - }, - "zoneAffinity": { - "description": "If specified, affinity will define the pod's scheduling constraints", - "type": "object", - "properties": { - "nodeAffinity": { - "description": "Describes node affinity scheduling rules for the pod.", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node matches the corresponding matchExpressions; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "$ref": "#/definitions/ZoneAffinityNodeAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to an update), the system may or may not try to eventually evict the pod from its node.", - "type": "object", - "required": [ - "nodeSelectorTerms" - ], - "properties": { - "nodeSelectorTerms": { - "description": "Required. A list of node selector terms. The terms are ORed.", - "type": "array", - "items": { - "$ref": "#/definitions/nodeSelectorTerm" - } - } - } - } - } - }, - "podAffinity": { - "description": "Describes pod affinity scheduling rules (e.g. co-locate this pod in the same node, zone, etc. as some other pod(s)).", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "$ref": "#/definitions/ZoneAffinityPodAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "$ref": "#/definitions/podAffinityTerm" - } - } - } - }, - "podAntiAffinity": { - "description": "Describes pod anti-affinity scheduling rules (e.g. avoid putting this pod in the same node, zone, etc. as some other pod(s)).", - "type": "object", - "properties": { - "preferredDuringSchedulingIgnoredDuringExecution": { - "description": "The scheduler will prefer to schedule pods to nodes that satisfy the anti-affinity expressions specified by this field, but it may choose a node that violates one or more of the expressions. The node that is most preferred is the one with the greatest sum of weights, i.e. for each node that meets all of the scheduling requirements (resource request, requiredDuringScheduling anti-affinity expressions, etc.), compute a sum by iterating through the elements of this field and adding \"weight\" to the sum if the node has pods which matches the corresponding podAffinityTerm; the node(s) with the highest sum are the most preferred.", - "type": "array", - "items": { - "$ref": "#/definitions/ZoneAffinityPodAntiAffinityPreferredDuringSchedulingIgnoredDuringExecutionItems0" - } - }, - "requiredDuringSchedulingIgnoredDuringExecution": { - "description": "If the anti-affinity requirements specified by this field are not met at scheduling time, the pod will not be scheduled onto the node. If the anti-affinity requirements specified by this field cease to be met at some point during pod execution (e.g. due to a pod label update), the system may or may not try to eventually evict the pod from its node. When there are multiple elements, the lists of nodes corresponding to each podAffinityTerm are intersected, i.e. all terms must be satisfied.", - "type": "array", - "items": { - "$ref": "#/definitions/podAffinityTerm" - } - } - } - } - } - }, - "zoneResources": { - "description": "If provided, use these requests and limit for cpu/memory resource allocation", - "type": "object", - "properties": { - "limits": { - "description": "Limits describes the maximum amount of compute resources allowed. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "type": "integer", - "format": "int64" - } - }, - "requests": { - "description": "Requests describes the minimum amount of compute resources required. If Requests is omitted for a container, it defaults to Limits if that is explicitly specified, otherwise to an implementation-defined value. More info: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/", - "type": "object", - "additionalProperties": { - "type": "integer", - "format": "int64" - } - } - } - }, - "zoneTolerationSeconds": { - "description": "TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. By default, it is not set, which means tolerate the taint forever (do not evict). Zero and negative values will be treated as 0 (evict immediately) by the system.", - "type": "object", - "required": [ - "seconds" - ], - "properties": { - "seconds": { - "type": "integer", - "format": "int64" - } - } - }, - "zoneTolerations": { - "description": "Tolerations allows users to set entries like effect, key, operator, value.", - "type": "array", - "items": { - "$ref": "#/definitions/ZoneTolerationsItems0" - } - }, - "zoneUpdateRequest": { - "type": "object", - "required": [ - "zones" - ], - "properties": { - "zones": { - "type": "array", - "items": { - "$ref": "#/definitions/zone" - } - } - } } }, "securityDefinitions": { diff --git a/restapi/operations/admin_api/tenant_add_zone.go b/restapi/operations/admin_api/tenant_add_pool.go similarity index 68% rename from restapi/operations/admin_api/tenant_add_zone.go rename to restapi/operations/admin_api/tenant_add_pool.go index 88d714f7c..8eea4db59 100644 --- a/restapi/operations/admin_api/tenant_add_zone.go +++ b/restapi/operations/admin_api/tenant_add_pool.go @@ -30,40 +30,40 @@ import ( "github.com/minio/console/models" ) -// TenantAddZoneHandlerFunc turns a function with the right signature into a tenant add zone handler -type TenantAddZoneHandlerFunc func(TenantAddZoneParams, *models.Principal) middleware.Responder +// TenantAddPoolHandlerFunc turns a function with the right signature into a tenant add pool handler +type TenantAddPoolHandlerFunc func(TenantAddPoolParams, *models.Principal) middleware.Responder // Handle executing the request and returning a response -func (fn TenantAddZoneHandlerFunc) Handle(params TenantAddZoneParams, principal *models.Principal) middleware.Responder { +func (fn TenantAddPoolHandlerFunc) Handle(params TenantAddPoolParams, principal *models.Principal) middleware.Responder { return fn(params, principal) } -// TenantAddZoneHandler interface for that can handle valid tenant add zone params -type TenantAddZoneHandler interface { - Handle(TenantAddZoneParams, *models.Principal) middleware.Responder +// TenantAddPoolHandler interface for that can handle valid tenant add pool params +type TenantAddPoolHandler interface { + Handle(TenantAddPoolParams, *models.Principal) middleware.Responder } -// NewTenantAddZone creates a new http.Handler for the tenant add zone operation -func NewTenantAddZone(ctx *middleware.Context, handler TenantAddZoneHandler) *TenantAddZone { - return &TenantAddZone{Context: ctx, Handler: handler} +// NewTenantAddPool creates a new http.Handler for the tenant add pool operation +func NewTenantAddPool(ctx *middleware.Context, handler TenantAddPoolHandler) *TenantAddPool { + return &TenantAddPool{Context: ctx, Handler: handler} } -/*TenantAddZone swagger:route POST /namespaces/{namespace}/tenants/{tenant}/zones AdminAPI tenantAddZone +/*TenantAddPool swagger:route POST /namespaces/{namespace}/tenants/{tenant}/pools AdminAPI tenantAddPool -Tenant Add Zone +Tenant Add Pool */ -type TenantAddZone struct { +type TenantAddPool struct { Context *middleware.Context - Handler TenantAddZoneHandler + Handler TenantAddPoolHandler } -func (o *TenantAddZone) ServeHTTP(rw http.ResponseWriter, r *http.Request) { +func (o *TenantAddPool) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { r = rCtx } - var Params = NewTenantAddZoneParams() + var Params = NewTenantAddPoolParams() uprinc, aCtx, err := o.Context.Authorize(r, route) if err != nil { diff --git a/restapi/operations/admin_api/tenant_add_zone_parameters.go b/restapi/operations/admin_api/tenant_add_pool_parameters.go similarity index 85% rename from restapi/operations/admin_api/tenant_add_zone_parameters.go rename to restapi/operations/admin_api/tenant_add_pool_parameters.go index f52f06bc3..0d620e99f 100644 --- a/restapi/operations/admin_api/tenant_add_zone_parameters.go +++ b/restapi/operations/admin_api/tenant_add_pool_parameters.go @@ -34,18 +34,18 @@ import ( "github.com/minio/console/models" ) -// NewTenantAddZoneParams creates a new TenantAddZoneParams object +// NewTenantAddPoolParams creates a new TenantAddPoolParams object // no default values defined in spec. -func NewTenantAddZoneParams() TenantAddZoneParams { +func NewTenantAddPoolParams() TenantAddPoolParams { - return TenantAddZoneParams{} + return TenantAddPoolParams{} } -// TenantAddZoneParams contains all the bound params for the tenant add zone operation +// TenantAddPoolParams contains all the bound params for the tenant add pool operation // typically these are obtained from a http.Request // -// swagger:parameters TenantAddZone -type TenantAddZoneParams struct { +// swagger:parameters TenantAddPool +type TenantAddPoolParams struct { // HTTP Request Object HTTPRequest *http.Request `json:"-"` @@ -54,7 +54,7 @@ type TenantAddZoneParams struct { Required: true In: body */ - Body *models.Zone + Body *models.Pool /* Required: true In: path @@ -70,15 +70,15 @@ type TenantAddZoneParams struct { // BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface // for simple values it will use straight method calls. // -// To ensure default values, the struct must have been initialized with NewTenantAddZoneParams() beforehand. -func (o *TenantAddZoneParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { +// To ensure default values, the struct must have been initialized with NewTenantAddPoolParams() beforehand. +func (o *TenantAddPoolParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { var res []error o.HTTPRequest = r if runtime.HasBody(r) { defer r.Body.Close() - var body models.Zone + var body models.Pool if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { res = append(res, errors.Required("body", "body", "")) @@ -115,7 +115,7 @@ func (o *TenantAddZoneParams) BindRequest(r *http.Request, route *middleware.Mat } // bindNamespace binds and validates parameter Namespace from path. -func (o *TenantAddZoneParams) bindNamespace(rawData []string, hasKey bool, formats strfmt.Registry) error { +func (o *TenantAddPoolParams) bindNamespace(rawData []string, hasKey bool, formats strfmt.Registry) error { var raw string if len(rawData) > 0 { raw = rawData[len(rawData)-1] @@ -130,7 +130,7 @@ func (o *TenantAddZoneParams) bindNamespace(rawData []string, hasKey bool, forma } // bindTenant binds and validates parameter Tenant from path. -func (o *TenantAddZoneParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { +func (o *TenantAddPoolParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { var raw string if len(rawData) > 0 { raw = rawData[len(rawData)-1] diff --git a/restapi/operations/admin_api/tenant_add_zone_responses.go b/restapi/operations/admin_api/tenant_add_pool_responses.go similarity index 57% rename from restapi/operations/admin_api/tenant_add_zone_responses.go rename to restapi/operations/admin_api/tenant_add_pool_responses.go index 32567009e..70762658b 100644 --- a/restapi/operations/admin_api/tenant_add_zone_responses.go +++ b/restapi/operations/admin_api/tenant_add_pool_responses.go @@ -30,35 +30,35 @@ import ( "github.com/minio/console/models" ) -// TenantAddZoneCreatedCode is the HTTP code returned for type TenantAddZoneCreated -const TenantAddZoneCreatedCode int = 201 +// TenantAddPoolCreatedCode is the HTTP code returned for type TenantAddPoolCreated +const TenantAddPoolCreatedCode int = 201 -/*TenantAddZoneCreated A successful response. +/*TenantAddPoolCreated A successful response. -swagger:response tenantAddZoneCreated +swagger:response tenantAddPoolCreated */ -type TenantAddZoneCreated struct { +type TenantAddPoolCreated struct { } -// NewTenantAddZoneCreated creates TenantAddZoneCreated with default headers values -func NewTenantAddZoneCreated() *TenantAddZoneCreated { +// NewTenantAddPoolCreated creates TenantAddPoolCreated with default headers values +func NewTenantAddPoolCreated() *TenantAddPoolCreated { - return &TenantAddZoneCreated{} + return &TenantAddPoolCreated{} } // WriteResponse to the client -func (o *TenantAddZoneCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { +func (o *TenantAddPoolCreated) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses rw.WriteHeader(201) } -/*TenantAddZoneDefault Generic error response. +/*TenantAddPoolDefault Generic error response. -swagger:response tenantAddZoneDefault +swagger:response tenantAddPoolDefault */ -type TenantAddZoneDefault struct { +type TenantAddPoolDefault struct { _statusCode int /* @@ -67,41 +67,41 @@ type TenantAddZoneDefault struct { Payload *models.Error `json:"body,omitempty"` } -// NewTenantAddZoneDefault creates TenantAddZoneDefault with default headers values -func NewTenantAddZoneDefault(code int) *TenantAddZoneDefault { +// NewTenantAddPoolDefault creates TenantAddPoolDefault with default headers values +func NewTenantAddPoolDefault(code int) *TenantAddPoolDefault { if code <= 0 { code = 500 } - return &TenantAddZoneDefault{ + return &TenantAddPoolDefault{ _statusCode: code, } } -// WithStatusCode adds the status to the tenant add zone default response -func (o *TenantAddZoneDefault) WithStatusCode(code int) *TenantAddZoneDefault { +// WithStatusCode adds the status to the tenant add pool default response +func (o *TenantAddPoolDefault) WithStatusCode(code int) *TenantAddPoolDefault { o._statusCode = code return o } -// SetStatusCode sets the status to the tenant add zone default response -func (o *TenantAddZoneDefault) SetStatusCode(code int) { +// SetStatusCode sets the status to the tenant add pool default response +func (o *TenantAddPoolDefault) SetStatusCode(code int) { o._statusCode = code } -// WithPayload adds the payload to the tenant add zone default response -func (o *TenantAddZoneDefault) WithPayload(payload *models.Error) *TenantAddZoneDefault { +// WithPayload adds the payload to the tenant add pool default response +func (o *TenantAddPoolDefault) WithPayload(payload *models.Error) *TenantAddPoolDefault { o.Payload = payload return o } -// SetPayload sets the payload to the tenant add zone default response -func (o *TenantAddZoneDefault) SetPayload(payload *models.Error) { +// SetPayload sets the payload to the tenant add pool default response +func (o *TenantAddPoolDefault) SetPayload(payload *models.Error) { o.Payload = payload } // WriteResponse to the client -func (o *TenantAddZoneDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { +func (o *TenantAddPoolDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(o._statusCode) if o.Payload != nil { diff --git a/restapi/operations/admin_api/tenant_add_zone_urlbuilder.go b/restapi/operations/admin_api/tenant_add_pool_urlbuilder.go similarity index 79% rename from restapi/operations/admin_api/tenant_add_zone_urlbuilder.go rename to restapi/operations/admin_api/tenant_add_pool_urlbuilder.go index 6ca042e22..926c08d42 100644 --- a/restapi/operations/admin_api/tenant_add_zone_urlbuilder.go +++ b/restapi/operations/admin_api/tenant_add_pool_urlbuilder.go @@ -29,8 +29,8 @@ import ( "strings" ) -// TenantAddZoneURL generates an URL for the tenant add zone operation -type TenantAddZoneURL struct { +// TenantAddPoolURL generates an URL for the tenant add pool operation +type TenantAddPoolURL struct { Namespace string Tenant string @@ -42,7 +42,7 @@ type TenantAddZoneURL struct { // WithBasePath sets the base path for this url builder, only required when it's different from the // base path specified in the swagger spec. // When the value of the base path is an empty string -func (o *TenantAddZoneURL) WithBasePath(bp string) *TenantAddZoneURL { +func (o *TenantAddPoolURL) WithBasePath(bp string) *TenantAddPoolURL { o.SetBasePath(bp) return o } @@ -50,28 +50,28 @@ func (o *TenantAddZoneURL) WithBasePath(bp string) *TenantAddZoneURL { // SetBasePath sets the base path for this url builder, only required when it's different from the // base path specified in the swagger spec. // When the value of the base path is an empty string -func (o *TenantAddZoneURL) SetBasePath(bp string) { +func (o *TenantAddPoolURL) SetBasePath(bp string) { o._basePath = bp } // Build a url path and query string -func (o *TenantAddZoneURL) Build() (*url.URL, error) { +func (o *TenantAddPoolURL) Build() (*url.URL, error) { var _result url.URL - var _path = "/namespaces/{namespace}/tenants/{tenant}/zones" + var _path = "/namespaces/{namespace}/tenants/{tenant}/pools" namespace := o.Namespace if namespace != "" { _path = strings.Replace(_path, "{namespace}", namespace, -1) } else { - return nil, errors.New("namespace is required on TenantAddZoneURL") + return nil, errors.New("namespace is required on TenantAddPoolURL") } tenant := o.Tenant if tenant != "" { _path = strings.Replace(_path, "{tenant}", tenant, -1) } else { - return nil, errors.New("tenant is required on TenantAddZoneURL") + return nil, errors.New("tenant is required on TenantAddPoolURL") } _basePath := o._basePath @@ -84,7 +84,7 @@ func (o *TenantAddZoneURL) Build() (*url.URL, error) { } // Must is a helper function to panic when the url builder returns an error -func (o *TenantAddZoneURL) Must(u *url.URL, err error) *url.URL { +func (o *TenantAddPoolURL) Must(u *url.URL, err error) *url.URL { if err != nil { panic(err) } @@ -95,17 +95,17 @@ func (o *TenantAddZoneURL) Must(u *url.URL, err error) *url.URL { } // String returns the string representation of the path with query string -func (o *TenantAddZoneURL) String() string { +func (o *TenantAddPoolURL) String() string { return o.Must(o.Build()).String() } // BuildFull builds a full url with scheme, host, path and query string -func (o *TenantAddZoneURL) BuildFull(scheme, host string) (*url.URL, error) { +func (o *TenantAddPoolURL) BuildFull(scheme, host string) (*url.URL, error) { if scheme == "" { - return nil, errors.New("scheme is required for a full url on TenantAddZoneURL") + return nil, errors.New("scheme is required for a full url on TenantAddPoolURL") } if host == "" { - return nil, errors.New("host is required for a full url on TenantAddZoneURL") + return nil, errors.New("host is required for a full url on TenantAddPoolURL") } base, err := o.Build() @@ -119,6 +119,6 @@ func (o *TenantAddZoneURL) BuildFull(scheme, host string) (*url.URL, error) { } // StringFull returns the string representation of a complete url -func (o *TenantAddZoneURL) StringFull(scheme, host string) string { +func (o *TenantAddPoolURL) StringFull(scheme, host string) string { return o.Must(o.BuildFull(scheme, host)).String() } diff --git a/restapi/operations/admin_api/tenant_update_zones.go b/restapi/operations/admin_api/tenant_update_pools.go similarity index 64% rename from restapi/operations/admin_api/tenant_update_zones.go rename to restapi/operations/admin_api/tenant_update_pools.go index b54d734a0..4661255cc 100644 --- a/restapi/operations/admin_api/tenant_update_zones.go +++ b/restapi/operations/admin_api/tenant_update_pools.go @@ -30,40 +30,40 @@ import ( "github.com/minio/console/models" ) -// TenantUpdateZonesHandlerFunc turns a function with the right signature into a tenant update zones handler -type TenantUpdateZonesHandlerFunc func(TenantUpdateZonesParams, *models.Principal) middleware.Responder +// TenantUpdatePoolsHandlerFunc turns a function with the right signature into a tenant update pools handler +type TenantUpdatePoolsHandlerFunc func(TenantUpdatePoolsParams, *models.Principal) middleware.Responder // Handle executing the request and returning a response -func (fn TenantUpdateZonesHandlerFunc) Handle(params TenantUpdateZonesParams, principal *models.Principal) middleware.Responder { +func (fn TenantUpdatePoolsHandlerFunc) Handle(params TenantUpdatePoolsParams, principal *models.Principal) middleware.Responder { return fn(params, principal) } -// TenantUpdateZonesHandler interface for that can handle valid tenant update zones params -type TenantUpdateZonesHandler interface { - Handle(TenantUpdateZonesParams, *models.Principal) middleware.Responder +// TenantUpdatePoolsHandler interface for that can handle valid tenant update pools params +type TenantUpdatePoolsHandler interface { + Handle(TenantUpdatePoolsParams, *models.Principal) middleware.Responder } -// NewTenantUpdateZones creates a new http.Handler for the tenant update zones operation -func NewTenantUpdateZones(ctx *middleware.Context, handler TenantUpdateZonesHandler) *TenantUpdateZones { - return &TenantUpdateZones{Context: ctx, Handler: handler} +// NewTenantUpdatePools creates a new http.Handler for the tenant update pools operation +func NewTenantUpdatePools(ctx *middleware.Context, handler TenantUpdatePoolsHandler) *TenantUpdatePools { + return &TenantUpdatePools{Context: ctx, Handler: handler} } -/*TenantUpdateZones swagger:route PUT /namespaces/{namespace}/tenants/{tenant}/zones AdminAPI tenantUpdateZones +/*TenantUpdatePools swagger:route PUT /namespaces/{namespace}/tenants/{tenant}/pools AdminAPI tenantUpdatePools -Tenant Update Zones +Tenant Update Pools */ -type TenantUpdateZones struct { +type TenantUpdatePools struct { Context *middleware.Context - Handler TenantUpdateZonesHandler + Handler TenantUpdatePoolsHandler } -func (o *TenantUpdateZones) ServeHTTP(rw http.ResponseWriter, r *http.Request) { +func (o *TenantUpdatePools) ServeHTTP(rw http.ResponseWriter, r *http.Request) { route, rCtx, _ := o.Context.RouteInfo(r) if rCtx != nil { r = rCtx } - var Params = NewTenantUpdateZonesParams() + var Params = NewTenantUpdatePoolsParams() uprinc, aCtx, err := o.Context.Authorize(r, route) if err != nil { diff --git a/restapi/operations/admin_api/tenant_update_zones_parameters.go b/restapi/operations/admin_api/tenant_update_pools_parameters.go similarity index 83% rename from restapi/operations/admin_api/tenant_update_zones_parameters.go rename to restapi/operations/admin_api/tenant_update_pools_parameters.go index 4cc1142fd..ca37894ea 100644 --- a/restapi/operations/admin_api/tenant_update_zones_parameters.go +++ b/restapi/operations/admin_api/tenant_update_pools_parameters.go @@ -34,18 +34,18 @@ import ( "github.com/minio/console/models" ) -// NewTenantUpdateZonesParams creates a new TenantUpdateZonesParams object +// NewTenantUpdatePoolsParams creates a new TenantUpdatePoolsParams object // no default values defined in spec. -func NewTenantUpdateZonesParams() TenantUpdateZonesParams { +func NewTenantUpdatePoolsParams() TenantUpdatePoolsParams { - return TenantUpdateZonesParams{} + return TenantUpdatePoolsParams{} } -// TenantUpdateZonesParams contains all the bound params for the tenant update zones operation +// TenantUpdatePoolsParams contains all the bound params for the tenant update pools operation // typically these are obtained from a http.Request // -// swagger:parameters TenantUpdateZones -type TenantUpdateZonesParams struct { +// swagger:parameters TenantUpdatePools +type TenantUpdatePoolsParams struct { // HTTP Request Object HTTPRequest *http.Request `json:"-"` @@ -54,7 +54,7 @@ type TenantUpdateZonesParams struct { Required: true In: body */ - Body *models.ZoneUpdateRequest + Body *models.PoolUpdateRequest /* Required: true In: path @@ -70,15 +70,15 @@ type TenantUpdateZonesParams struct { // BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface // for simple values it will use straight method calls. // -// To ensure default values, the struct must have been initialized with NewTenantUpdateZonesParams() beforehand. -func (o *TenantUpdateZonesParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { +// To ensure default values, the struct must have been initialized with NewTenantUpdatePoolsParams() beforehand. +func (o *TenantUpdatePoolsParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { var res []error o.HTTPRequest = r if runtime.HasBody(r) { defer r.Body.Close() - var body models.ZoneUpdateRequest + var body models.PoolUpdateRequest if err := route.Consumer.Consume(r.Body, &body); err != nil { if err == io.EOF { res = append(res, errors.Required("body", "body", "")) @@ -115,7 +115,7 @@ func (o *TenantUpdateZonesParams) BindRequest(r *http.Request, route *middleware } // bindNamespace binds and validates parameter Namespace from path. -func (o *TenantUpdateZonesParams) bindNamespace(rawData []string, hasKey bool, formats strfmt.Registry) error { +func (o *TenantUpdatePoolsParams) bindNamespace(rawData []string, hasKey bool, formats strfmt.Registry) error { var raw string if len(rawData) > 0 { raw = rawData[len(rawData)-1] @@ -130,7 +130,7 @@ func (o *TenantUpdateZonesParams) bindNamespace(rawData []string, hasKey bool, f } // bindTenant binds and validates parameter Tenant from path. -func (o *TenantUpdateZonesParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { +func (o *TenantUpdatePoolsParams) bindTenant(rawData []string, hasKey bool, formats strfmt.Registry) error { var raw string if len(rawData) > 0 { raw = rawData[len(rawData)-1] diff --git a/restapi/operations/admin_api/tenant_update_zones_responses.go b/restapi/operations/admin_api/tenant_update_pools_responses.go similarity index 56% rename from restapi/operations/admin_api/tenant_update_zones_responses.go rename to restapi/operations/admin_api/tenant_update_pools_responses.go index 20f149adc..7dca20280 100644 --- a/restapi/operations/admin_api/tenant_update_zones_responses.go +++ b/restapi/operations/admin_api/tenant_update_pools_responses.go @@ -30,14 +30,14 @@ import ( "github.com/minio/console/models" ) -// TenantUpdateZonesOKCode is the HTTP code returned for type TenantUpdateZonesOK -const TenantUpdateZonesOKCode int = 200 +// TenantUpdatePoolsOKCode is the HTTP code returned for type TenantUpdatePoolsOK +const TenantUpdatePoolsOKCode int = 200 -/*TenantUpdateZonesOK A successful response. +/*TenantUpdatePoolsOK A successful response. -swagger:response tenantUpdateZonesOK +swagger:response tenantUpdatePoolsOK */ -type TenantUpdateZonesOK struct { +type TenantUpdatePoolsOK struct { /* In: Body @@ -45,25 +45,25 @@ type TenantUpdateZonesOK struct { Payload *models.Tenant `json:"body,omitempty"` } -// NewTenantUpdateZonesOK creates TenantUpdateZonesOK with default headers values -func NewTenantUpdateZonesOK() *TenantUpdateZonesOK { +// NewTenantUpdatePoolsOK creates TenantUpdatePoolsOK with default headers values +func NewTenantUpdatePoolsOK() *TenantUpdatePoolsOK { - return &TenantUpdateZonesOK{} + return &TenantUpdatePoolsOK{} } -// WithPayload adds the payload to the tenant update zones o k response -func (o *TenantUpdateZonesOK) WithPayload(payload *models.Tenant) *TenantUpdateZonesOK { +// WithPayload adds the payload to the tenant update pools o k response +func (o *TenantUpdatePoolsOK) WithPayload(payload *models.Tenant) *TenantUpdatePoolsOK { o.Payload = payload return o } -// SetPayload sets the payload to the tenant update zones o k response -func (o *TenantUpdateZonesOK) SetPayload(payload *models.Tenant) { +// SetPayload sets the payload to the tenant update pools o k response +func (o *TenantUpdatePoolsOK) SetPayload(payload *models.Tenant) { o.Payload = payload } // WriteResponse to the client -func (o *TenantUpdateZonesOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { +func (o *TenantUpdatePoolsOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(200) if o.Payload != nil { @@ -74,11 +74,11 @@ func (o *TenantUpdateZonesOK) WriteResponse(rw http.ResponseWriter, producer run } } -/*TenantUpdateZonesDefault Generic error response. +/*TenantUpdatePoolsDefault Generic error response. -swagger:response tenantUpdateZonesDefault +swagger:response tenantUpdatePoolsDefault */ -type TenantUpdateZonesDefault struct { +type TenantUpdatePoolsDefault struct { _statusCode int /* @@ -87,41 +87,41 @@ type TenantUpdateZonesDefault struct { Payload *models.Error `json:"body,omitempty"` } -// NewTenantUpdateZonesDefault creates TenantUpdateZonesDefault with default headers values -func NewTenantUpdateZonesDefault(code int) *TenantUpdateZonesDefault { +// NewTenantUpdatePoolsDefault creates TenantUpdatePoolsDefault with default headers values +func NewTenantUpdatePoolsDefault(code int) *TenantUpdatePoolsDefault { if code <= 0 { code = 500 } - return &TenantUpdateZonesDefault{ + return &TenantUpdatePoolsDefault{ _statusCode: code, } } -// WithStatusCode adds the status to the tenant update zones default response -func (o *TenantUpdateZonesDefault) WithStatusCode(code int) *TenantUpdateZonesDefault { +// WithStatusCode adds the status to the tenant update pools default response +func (o *TenantUpdatePoolsDefault) WithStatusCode(code int) *TenantUpdatePoolsDefault { o._statusCode = code return o } -// SetStatusCode sets the status to the tenant update zones default response -func (o *TenantUpdateZonesDefault) SetStatusCode(code int) { +// SetStatusCode sets the status to the tenant update pools default response +func (o *TenantUpdatePoolsDefault) SetStatusCode(code int) { o._statusCode = code } -// WithPayload adds the payload to the tenant update zones default response -func (o *TenantUpdateZonesDefault) WithPayload(payload *models.Error) *TenantUpdateZonesDefault { +// WithPayload adds the payload to the tenant update pools default response +func (o *TenantUpdatePoolsDefault) WithPayload(payload *models.Error) *TenantUpdatePoolsDefault { o.Payload = payload return o } -// SetPayload sets the payload to the tenant update zones default response -func (o *TenantUpdateZonesDefault) SetPayload(payload *models.Error) { +// SetPayload sets the payload to the tenant update pools default response +func (o *TenantUpdatePoolsDefault) SetPayload(payload *models.Error) { o.Payload = payload } // WriteResponse to the client -func (o *TenantUpdateZonesDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { +func (o *TenantUpdatePoolsDefault) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { rw.WriteHeader(o._statusCode) if o.Payload != nil { diff --git a/restapi/operations/admin_api/tenant_update_zones_urlbuilder.go b/restapi/operations/admin_api/tenant_update_pools_urlbuilder.go similarity index 78% rename from restapi/operations/admin_api/tenant_update_zones_urlbuilder.go rename to restapi/operations/admin_api/tenant_update_pools_urlbuilder.go index 05fd1dde1..5de52318f 100644 --- a/restapi/operations/admin_api/tenant_update_zones_urlbuilder.go +++ b/restapi/operations/admin_api/tenant_update_pools_urlbuilder.go @@ -29,8 +29,8 @@ import ( "strings" ) -// TenantUpdateZonesURL generates an URL for the tenant update zones operation -type TenantUpdateZonesURL struct { +// TenantUpdatePoolsURL generates an URL for the tenant update pools operation +type TenantUpdatePoolsURL struct { Namespace string Tenant string @@ -42,7 +42,7 @@ type TenantUpdateZonesURL struct { // WithBasePath sets the base path for this url builder, only required when it's different from the // base path specified in the swagger spec. // When the value of the base path is an empty string -func (o *TenantUpdateZonesURL) WithBasePath(bp string) *TenantUpdateZonesURL { +func (o *TenantUpdatePoolsURL) WithBasePath(bp string) *TenantUpdatePoolsURL { o.SetBasePath(bp) return o } @@ -50,28 +50,28 @@ func (o *TenantUpdateZonesURL) WithBasePath(bp string) *TenantUpdateZonesURL { // SetBasePath sets the base path for this url builder, only required when it's different from the // base path specified in the swagger spec. // When the value of the base path is an empty string -func (o *TenantUpdateZonesURL) SetBasePath(bp string) { +func (o *TenantUpdatePoolsURL) SetBasePath(bp string) { o._basePath = bp } // Build a url path and query string -func (o *TenantUpdateZonesURL) Build() (*url.URL, error) { +func (o *TenantUpdatePoolsURL) Build() (*url.URL, error) { var _result url.URL - var _path = "/namespaces/{namespace}/tenants/{tenant}/zones" + var _path = "/namespaces/{namespace}/tenants/{tenant}/pools" namespace := o.Namespace if namespace != "" { _path = strings.Replace(_path, "{namespace}", namespace, -1) } else { - return nil, errors.New("namespace is required on TenantUpdateZonesURL") + return nil, errors.New("namespace is required on TenantUpdatePoolsURL") } tenant := o.Tenant if tenant != "" { _path = strings.Replace(_path, "{tenant}", tenant, -1) } else { - return nil, errors.New("tenant is required on TenantUpdateZonesURL") + return nil, errors.New("tenant is required on TenantUpdatePoolsURL") } _basePath := o._basePath @@ -84,7 +84,7 @@ func (o *TenantUpdateZonesURL) Build() (*url.URL, error) { } // Must is a helper function to panic when the url builder returns an error -func (o *TenantUpdateZonesURL) Must(u *url.URL, err error) *url.URL { +func (o *TenantUpdatePoolsURL) Must(u *url.URL, err error) *url.URL { if err != nil { panic(err) } @@ -95,17 +95,17 @@ func (o *TenantUpdateZonesURL) Must(u *url.URL, err error) *url.URL { } // String returns the string representation of the path with query string -func (o *TenantUpdateZonesURL) String() string { +func (o *TenantUpdatePoolsURL) String() string { return o.Must(o.Build()).String() } // BuildFull builds a full url with scheme, host, path and query string -func (o *TenantUpdateZonesURL) BuildFull(scheme, host string) (*url.URL, error) { +func (o *TenantUpdatePoolsURL) BuildFull(scheme, host string) (*url.URL, error) { if scheme == "" { - return nil, errors.New("scheme is required for a full url on TenantUpdateZonesURL") + return nil, errors.New("scheme is required for a full url on TenantUpdatePoolsURL") } if host == "" { - return nil, errors.New("host is required for a full url on TenantUpdateZonesURL") + return nil, errors.New("host is required for a full url on TenantUpdatePoolsURL") } base, err := o.Build() @@ -119,6 +119,6 @@ func (o *TenantUpdateZonesURL) BuildFull(scheme, host string) (*url.URL, error) } // StringFull returns the string representation of a complete url -func (o *TenantUpdateZonesURL) StringFull(scheme, host string) string { +func (o *TenantUpdatePoolsURL) StringFull(scheme, host string) string { return o.Must(o.BuildFull(scheme, host)).String() } diff --git a/restapi/operations/console_api.go b/restapi/operations/console_api.go index 31f23a23d..e3720a4e5 100644 --- a/restapi/operations/console_api.go +++ b/restapi/operations/console_api.go @@ -287,8 +287,8 @@ func NewConsoleAPI(spec *loads.Document) *ConsoleAPI { UserAPIShareObjectHandler: user_api.ShareObjectHandlerFunc(func(params user_api.ShareObjectParams, principal *models.Principal) middleware.Responder { return middleware.NotImplemented("operation user_api.ShareObject has not yet been implemented") }), - AdminAPITenantAddZoneHandler: admin_api.TenantAddZoneHandlerFunc(func(params admin_api.TenantAddZoneParams, principal *models.Principal) middleware.Responder { - return middleware.NotImplemented("operation admin_api.TenantAddZone has not yet been implemented") + AdminAPITenantAddPoolHandler: admin_api.TenantAddPoolHandlerFunc(func(params admin_api.TenantAddPoolParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation admin_api.TenantAddPool has not yet been implemented") }), AdminAPITenantInfoHandler: admin_api.TenantInfoHandlerFunc(func(params admin_api.TenantInfoParams, principal *models.Principal) middleware.Responder { return middleware.NotImplemented("operation admin_api.TenantInfo has not yet been implemented") @@ -299,8 +299,8 @@ func NewConsoleAPI(spec *loads.Document) *ConsoleAPI { AdminAPITenantUpdateEncryptionHandler: admin_api.TenantUpdateEncryptionHandlerFunc(func(params admin_api.TenantUpdateEncryptionParams, principal *models.Principal) middleware.Responder { return middleware.NotImplemented("operation admin_api.TenantUpdateEncryption has not yet been implemented") }), - AdminAPITenantUpdateZonesHandler: admin_api.TenantUpdateZonesHandlerFunc(func(params admin_api.TenantUpdateZonesParams, principal *models.Principal) middleware.Responder { - return middleware.NotImplemented("operation admin_api.TenantUpdateZones has not yet been implemented") + AdminAPITenantUpdatePoolsHandler: admin_api.TenantUpdatePoolsHandlerFunc(func(params admin_api.TenantUpdatePoolsParams, principal *models.Principal) middleware.Responder { + return middleware.NotImplemented("operation admin_api.TenantUpdatePools has not yet been implemented") }), AdminAPIUpdateGroupHandler: admin_api.UpdateGroupHandlerFunc(func(params admin_api.UpdateGroupParams, principal *models.Principal) middleware.Responder { return middleware.NotImplemented("operation admin_api.UpdateGroup has not yet been implemented") @@ -514,16 +514,16 @@ type ConsoleAPI struct { AdminAPISetPolicyMultipleHandler admin_api.SetPolicyMultipleHandler // UserAPIShareObjectHandler sets the operation handler for the share object operation UserAPIShareObjectHandler user_api.ShareObjectHandler - // AdminAPITenantAddZoneHandler sets the operation handler for the tenant add zone operation - AdminAPITenantAddZoneHandler admin_api.TenantAddZoneHandler + // AdminAPITenantAddPoolHandler sets the operation handler for the tenant add pool operation + AdminAPITenantAddPoolHandler admin_api.TenantAddPoolHandler // AdminAPITenantInfoHandler sets the operation handler for the tenant info operation AdminAPITenantInfoHandler admin_api.TenantInfoHandler // AdminAPITenantUpdateCertificateHandler sets the operation handler for the tenant update certificate operation AdminAPITenantUpdateCertificateHandler admin_api.TenantUpdateCertificateHandler // AdminAPITenantUpdateEncryptionHandler sets the operation handler for the tenant update encryption operation AdminAPITenantUpdateEncryptionHandler admin_api.TenantUpdateEncryptionHandler - // AdminAPITenantUpdateZonesHandler sets the operation handler for the tenant update zones operation - AdminAPITenantUpdateZonesHandler admin_api.TenantUpdateZonesHandler + // AdminAPITenantUpdatePoolsHandler sets the operation handler for the tenant update pools operation + AdminAPITenantUpdatePoolsHandler admin_api.TenantUpdatePoolsHandler // AdminAPIUpdateGroupHandler sets the operation handler for the update group operation AdminAPIUpdateGroupHandler admin_api.UpdateGroupHandler // AdminAPIUpdateTenantHandler sets the operation handler for the update tenant operation @@ -830,8 +830,8 @@ func (o *ConsoleAPI) Validate() error { if o.UserAPIShareObjectHandler == nil { unregistered = append(unregistered, "user_api.ShareObjectHandler") } - if o.AdminAPITenantAddZoneHandler == nil { - unregistered = append(unregistered, "admin_api.TenantAddZoneHandler") + if o.AdminAPITenantAddPoolHandler == nil { + unregistered = append(unregistered, "admin_api.TenantAddPoolHandler") } if o.AdminAPITenantInfoHandler == nil { unregistered = append(unregistered, "admin_api.TenantInfoHandler") @@ -842,8 +842,8 @@ func (o *ConsoleAPI) Validate() error { if o.AdminAPITenantUpdateEncryptionHandler == nil { unregistered = append(unregistered, "admin_api.TenantUpdateEncryptionHandler") } - if o.AdminAPITenantUpdateZonesHandler == nil { - unregistered = append(unregistered, "admin_api.TenantUpdateZonesHandler") + if o.AdminAPITenantUpdatePoolsHandler == nil { + unregistered = append(unregistered, "admin_api.TenantUpdatePoolsHandler") } if o.AdminAPIUpdateGroupHandler == nil { unregistered = append(unregistered, "admin_api.UpdateGroupHandler") @@ -1258,7 +1258,7 @@ func (o *ConsoleAPI) initHandlerCache() { if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) } - o.handlers["POST"]["/namespaces/{namespace}/tenants/{tenant}/zones"] = admin_api.NewTenantAddZone(o.context, o.AdminAPITenantAddZoneHandler) + o.handlers["POST"]["/namespaces/{namespace}/tenants/{tenant}/pools"] = admin_api.NewTenantAddPool(o.context, o.AdminAPITenantAddPoolHandler) if o.handlers["GET"] == nil { o.handlers["GET"] = make(map[string]http.Handler) } @@ -1274,7 +1274,7 @@ func (o *ConsoleAPI) initHandlerCache() { if o.handlers["PUT"] == nil { o.handlers["PUT"] = make(map[string]http.Handler) } - o.handlers["PUT"]["/namespaces/{namespace}/tenants/{tenant}/zones"] = admin_api.NewTenantUpdateZones(o.context, o.AdminAPITenantUpdateZonesHandler) + o.handlers["PUT"]["/namespaces/{namespace}/tenants/{tenant}/pools"] = admin_api.NewTenantUpdatePools(o.context, o.AdminAPITenantUpdatePoolsHandler) if o.handlers["PUT"] == nil { o.handlers["PUT"] = make(map[string]http.Handler) } diff --git a/restapi/server.go b/restapi/server.go index e848ecf78..27e7944bd 100644 --- a/restapi/server.go +++ b/restapi/server.go @@ -298,7 +298,7 @@ func (s *Server) Serve() (err error) { caCertPool := x509.NewCertPool() ok := caCertPool.AppendCertsFromPEM(caCert) if !ok { - return fmt.Errorf("unable to parse CA certificate %s", s.TLSCACertificate) + return fmt.Errorf("cannot parse CA certificate") } httpsServer.TLSConfig.ClientCAs = caCertPool httpsServer.TLSConfig.ClientAuth = tls.RequireAndVerifyClientCert diff --git a/swagger.yml b/swagger.yml index 7d6e361a3..419eb7a2c 100644 --- a/swagger.yml +++ b/swagger.yml @@ -236,7 +236,7 @@ paths: in: body required: true schema: - $ref: "#/definitions/putObjectRetentionRequest" + $ref: "#/definitions/putBucketRetentionRequest" responses: 200: description: A successful response. @@ -1715,10 +1715,10 @@ paths: tags: - AdminAPI - /namespaces/{namespace}/tenants/{tenant}/zones: + /namespaces/{namespace}/tenants/{tenant}/pools: post: - summary: Tenant Add Zone - operationId: TenantAddZone + summary: Tenant Add Pool + operationId: TenantAddPool parameters: - name: namespace in: path @@ -1732,7 +1732,7 @@ paths: in: body required: true schema: - $ref: "#/definitions/zone" + $ref: "#/definitions/pool" responses: 201: description: A successful response. @@ -1743,8 +1743,8 @@ paths: tags: - AdminAPI put: - summary: Tenant Update Zones - operationId: TenantUpdateZones + summary: Tenant Update Pools + operationId: TenantUpdatePools parameters: - name: namespace in: path @@ -1758,7 +1758,7 @@ paths: in: body required: true schema: - $ref: "#/definitions/zoneUpdateRequest" + $ref: "#/definitions/poolUpdateRequest" responses: 200: description: A successful response. @@ -2711,10 +2711,10 @@ definitions: type: string currentState: type: string - zones: + pools: type: array items: - $ref: "#/definitions/zone" + $ref: "#/definitions/pool" image: type: string console_image: @@ -2742,7 +2742,7 @@ definitions: properties: name: type: string - zone_count: + pool_count: type: integer instance_count: type: integer @@ -2807,7 +2807,7 @@ definitions: required: - name - namespace - - zones + - pools properties: name: type: string @@ -2816,10 +2816,10 @@ definitions: type: string console_image: type: string - zones: + pools: type: array items: - $ref: "#/definitions/zone" + $ref: "#/definitions/pool" mounth_path: type: string access_key: @@ -3093,7 +3093,7 @@ definitions: type: string secret_key: type: string - zone: + pool: type: object required: - servers @@ -3125,7 +3125,7 @@ definitions: additionalProperties: type: string resources: - $ref: "#/definitions/zoneResources" + $ref: "#/definitions/poolResources" node_selector: type: object additionalProperties: @@ -3135,11 +3135,11 @@ definitions: labels for the pod to be scheduled on that node. More info: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/" affinity: - $ref: "#/definitions/zoneAffinity" + $ref: "#/definitions/poolAffinity" tolerations: - $ref: "#/definitions/zoneTolerations" + $ref: "#/definitions/poolTolerations" - zoneTolerations: + poolTolerations: description: Tolerations allows users to set entries like effect, key, operator, value. items: @@ -3166,7 +3166,7 @@ definitions: category. type: string tolerationSeconds: - $ref: "#/definitions/zoneTolerationSeconds" + $ref: "#/definitions/poolTolerationSeconds" value: description: Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, @@ -3175,7 +3175,7 @@ definitions: type: object type: array - zoneTolerationSeconds: + poolTolerationSeconds: description: TolerationSeconds represents the period of time the toleration (which must be of effect NoExecute, otherwise this field is ignored) tolerates the taint. @@ -3190,7 +3190,7 @@ definitions: type: integer format: int64 - zoneResources: + poolResources: description: If provided, use these requests and limit for cpu/memory resource allocation properties: @@ -3213,7 +3213,7 @@ definitions: type: object type: object - zoneAffinity: + poolAffinity: description: If specified, affinity will define the pod's scheduling constraints properties: @@ -3275,7 +3275,7 @@ definitions: type: object podAffinity: description: Describes pod affinity scheduling rules (e.g. - co-locate this pod in the same node, zone, etc. as some + co-locate this pod in the same node, pool, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: @@ -3325,7 +3325,7 @@ definitions: type: object podAntiAffinity: description: Describes pod anti-affinity scheduling rules - (e.g. avoid putting this pod in the same node, zone, etc. + (e.g. avoid putting this pod in the same node, pool, etc. as some other pod(s)). properties: preferredDuringSchedulingIgnoredDuringExecution: @@ -3564,15 +3564,15 @@ definitions: delete_pvcs: type: boolean - zoneUpdateRequest: + poolUpdateRequest: type: object required: - - zones + - pools properties: - zones: + pools: type: array items: - $ref: "#/definitions/zone" + $ref: "#/definitions/pool" maxAllocatableMemResponse: type: object