1
0
mirror of https://github.com/google/nomulus synced 2025-12-23 06:15:42 +00:00

Update resource allocation for all Nomulus GKE deployments (#2796)

ALl deployments received update to averageUtilization cpu. This should allow us to stay ahead of the curve of traffic and create instances before we cpu reached the limit.
Frontend cpu allocation has caused "noise neighbors" problem with pods assigned to nodes where there's not enough bursting capacity, so I increased it.
Adjusted rest of the deployments according to their utilization.
This commit is contained in:
Pavlo Tkach
2025-08-08 13:55:08 -04:00
committed by GitHub
parent 427f6db820
commit 18614ba11e
4 changed files with 25 additions and 16 deletions

View File

@@ -25,8 +25,11 @@ spec:
name: http
resources:
requests:
cpu: "100m"
memory: "512Mi"
cpu: "500m"
memory: "1Gi"
limits:
cpu: "1000m"
memory: "1Gi"
args: [ENVIRONMENT]
env:
- name: POD_ID
@@ -61,7 +64,7 @@ spec:
name: cpu
target:
type: Utilization
averageUtilization: 100
averageUtilization: 80
---
apiVersion: v1
kind: Service

View File

@@ -41,7 +41,7 @@ spec:
# class from performance, which has implicit pod-slots 1
cloud.google.com/pod-slots: 0
cpu: "500m"
memory: "2Gi"
memory: "1Gi"
args: [ENVIRONMENT]
env:
- name: POD_ID
@@ -76,7 +76,7 @@ spec:
name: cpu
target:
type: Utilization
averageUtilization: 100
averageUtilization: 80
---
apiVersion: v1
kind: Service

View File

@@ -25,8 +25,11 @@ spec:
name: http
resources:
requests:
cpu: "100m"
cpu: "1000m"
memory: "1Gi"
limits:
cpu: "1000m"
memory: "2Gi"
args: [ENVIRONMENT]
env:
- name: POD_ID
@@ -50,7 +53,10 @@ spec:
name: epp
resources:
requests:
cpu: "100m"
cpu: "1000m"
memory: "512Mi"
limits:
cpu: "1000m"
memory: "512Mi"
args: [--env, PROXY_ENV, --log, --local]
env:
@@ -90,7 +96,7 @@ spec:
name: cpu
target:
type: Utilization
averageUtilization: 100
averageUtilization: 80
---
apiVersion: v1
kind: Service

View File

@@ -34,13 +34,13 @@ spec:
# explicit pod-slots 0 is required in order to downgrade node
# class from performance, which has implicit pod-slots 1
cloud.google.com/pod-slots: 0
cpu: "100m"
cpu: "500m"
memory: "1Gi"
limits:
# explicit pod-slots 0 is required in order to downgrade node
# class from performance, which has implicit pod-slots 1
cloud.google.com/pod-slots: 0
cpu: "500m"
cpu: "1000m"
memory: "2Gi"
args: [ENVIRONMENT]
env:
@@ -76,7 +76,7 @@ spec:
name: cpu
target:
type: Utilization
averageUtilization: 100
averageUtilization: 80
---
apiVersion: v1
kind: Service