Describe pod API (#1861)
This commit is contained in:
@@ -44,11 +44,13 @@ import (
|
||||
|
||||
"github.com/minio/console/pkg/auth/utils"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/serializer"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/duration"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/utils/strings/slices"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
|
||||
@@ -256,6 +258,14 @@ func registerTenantHandlers(api *operations.OperatorAPI) {
|
||||
return operator_api.NewGetPodEventsOK().WithPayload(payload)
|
||||
})
|
||||
|
||||
api.OperatorAPIDescribePodHandler = operator_api.DescribePodHandlerFunc(func(params operator_api.DescribePodParams, session *models.Principal) middleware.Responder {
|
||||
payload, err := getDescribePodResponse(session, params)
|
||||
if err != nil {
|
||||
return operator_api.NewDescribePodDefault(int(err.Code)).WithPayload(err)
|
||||
}
|
||||
return operator_api.NewDescribePodOK().WithPayload(payload)
|
||||
})
|
||||
|
||||
//Get tenant monitoring info
|
||||
api.OperatorAPIGetTenantMonitoringHandler = operator_api.GetTenantMonitoringHandlerFunc(func(params operator_api.GetTenantMonitoringParams, session *models.Principal) middleware.Responder {
|
||||
payload, err := getTenantMonitoringResponse(session, params)
|
||||
@@ -1800,6 +1810,289 @@ func getPodEventsResponse(session *models.Principal, params operator_api.GetPodE
|
||||
return retval, nil
|
||||
}
|
||||
|
||||
func getDescribePodResponse(session *models.Principal, params operator_api.DescribePodParams) (*models.DescribePodWrapper, *models.Error) {
|
||||
ctx := context.Background()
|
||||
clientset, err := cluster.K8sClient(session.STSSessionToken)
|
||||
if err != nil {
|
||||
return nil, restapi.ErrorWithContext(ctx, err)
|
||||
}
|
||||
pod, err := clientset.CoreV1().Pods(params.Namespace).Get(ctx, params.PodName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, restapi.ErrorWithContext(ctx, err)
|
||||
}
|
||||
retval := &models.DescribePodWrapper{
|
||||
Name: pod.Name,
|
||||
Namespace: pod.Namespace,
|
||||
PriorityClassName: pod.Spec.PriorityClassName,
|
||||
NodeName: pod.Spec.NodeName,
|
||||
}
|
||||
if pod.Spec.Priority != nil {
|
||||
retval.Priority = int64(*pod.Spec.Priority)
|
||||
}
|
||||
if pod.Status.StartTime != nil {
|
||||
retval.StartTime = pod.Status.StartTime.Time.String()
|
||||
}
|
||||
labelArray := make([]*models.Label, len(pod.Labels))
|
||||
i := 0
|
||||
for key := range pod.Labels {
|
||||
labelArray[i] = &models.Label{Key: key, Value: pod.Labels[key]}
|
||||
i++
|
||||
}
|
||||
retval.Labels = labelArray
|
||||
annotationArray := make([]*models.Annotation, len(pod.Annotations))
|
||||
i = 0
|
||||
for key := range pod.Annotations {
|
||||
annotationArray[i] = &models.Annotation{Key: key, Value: pod.Annotations[key]}
|
||||
i++
|
||||
}
|
||||
retval.Annotations = annotationArray
|
||||
if pod.DeletionTimestamp != nil {
|
||||
retval.DeletionTimestamp = translateTimestampSince(*pod.DeletionTimestamp)
|
||||
retval.DeletionGracePeriodSeconds = *pod.DeletionGracePeriodSeconds
|
||||
}
|
||||
retval.Phase = string(pod.Status.Phase)
|
||||
retval.Reason = pod.Status.Reason
|
||||
retval.Message = pod.Status.Message
|
||||
retval.PodIP = pod.Status.PodIP
|
||||
retval.ControllerRef = metav1.GetControllerOf(pod).String()
|
||||
retval.Containers = make([]*models.Container, len(pod.Spec.Containers))
|
||||
statusMap := map[string]corev1.ContainerStatus{}
|
||||
statusKeys := make([]string, len(pod.Status.ContainerStatuses))
|
||||
for i, status := range pod.Status.ContainerStatuses {
|
||||
statusMap[status.Name] = status
|
||||
statusKeys[i] = status.Name
|
||||
|
||||
}
|
||||
for i := range pod.Spec.Containers {
|
||||
retval.Containers[i] = &models.Container{
|
||||
Name: pod.Spec.Containers[i].Name,
|
||||
Image: pod.Spec.Containers[i].Image,
|
||||
Ports: describeContainerPorts(pod.Spec.Containers[i].Ports),
|
||||
HostPorts: describeContainerHostPorts(pod.Spec.Containers[i].Ports),
|
||||
Args: pod.Spec.Containers[i].Args,
|
||||
}
|
||||
if slices.Contains(statusKeys, pod.Spec.Containers[i].Name) {
|
||||
retval.Containers[i].ContainerID = statusMap[pod.Spec.Containers[i].Name].ContainerID
|
||||
retval.Containers[i].ImageID = statusMap[pod.Spec.Containers[i].Name].ImageID
|
||||
retval.Containers[i].Ready = statusMap[pod.Spec.Containers[i].Name].Ready
|
||||
retval.Containers[i].RestartCount = int64(statusMap[pod.Spec.Containers[i].Name].RestartCount)
|
||||
retval.Containers[i].State, retval.Containers[i].LastState = describeStatus(statusMap[pod.Spec.Containers[i].Name])
|
||||
}
|
||||
retval.Containers[i].EnvironmentVariables = make([]*models.EnvironmentVariable, len(pod.Spec.Containers[0].Env))
|
||||
for j := range pod.Spec.Containers[i].Env {
|
||||
retval.Containers[i].EnvironmentVariables[j] = &models.EnvironmentVariable{
|
||||
Key: pod.Spec.Containers[i].Env[j].Name,
|
||||
Value: pod.Spec.Containers[i].Env[j].Value,
|
||||
}
|
||||
}
|
||||
retval.Containers[i].Mounts = make([]*models.Mount, len(pod.Spec.Containers[i].VolumeMounts))
|
||||
for j := range pod.Spec.Containers[i].VolumeMounts {
|
||||
retval.Containers[i].Mounts[j] = &models.Mount{
|
||||
Name: pod.Spec.Containers[i].VolumeMounts[j].Name,
|
||||
MountPath: pod.Spec.Containers[i].VolumeMounts[j].MountPath,
|
||||
SubPath: pod.Spec.Containers[i].VolumeMounts[j].SubPath,
|
||||
ReadOnly: pod.Spec.Containers[i].VolumeMounts[j].ReadOnly,
|
||||
}
|
||||
}
|
||||
}
|
||||
retval.Conditions = make([]*models.Condition, len(pod.Status.Conditions))
|
||||
for i := range pod.Status.Conditions {
|
||||
retval.Conditions[i] = &models.Condition{
|
||||
Type: string(pod.Status.Conditions[i].Type),
|
||||
Status: string(pod.Status.Conditions[i].Status),
|
||||
}
|
||||
}
|
||||
retval.Volumes = make([]*models.Volume, len(pod.Spec.Volumes))
|
||||
for i := range pod.Spec.Volumes {
|
||||
retval.Volumes[i] = &models.Volume{
|
||||
Name: pod.Spec.Volumes[i].Name,
|
||||
}
|
||||
if pod.Spec.Volumes[i].PersistentVolumeClaim != nil {
|
||||
retval.Volumes[i].Pvc = &models.Pvc{
|
||||
ReadOnly: pod.Spec.Volumes[i].PersistentVolumeClaim.ReadOnly,
|
||||
ClaimName: pod.Spec.Volumes[i].PersistentVolumeClaim.ClaimName,
|
||||
}
|
||||
} else if pod.Spec.Volumes[i].Projected != nil {
|
||||
retval.Volumes[i].Projected = &models.ProjectedVolume{}
|
||||
retval.Volumes[i].Projected.Sources = make([]*models.ProjectedVolumeSource, len(pod.Spec.Volumes[i].Projected.Sources))
|
||||
for j := range pod.Spec.Volumes[i].Projected.Sources {
|
||||
retval.Volumes[i].Projected.Sources[j] = &models.ProjectedVolumeSource{}
|
||||
if pod.Spec.Volumes[i].Projected.Sources[j].Secret != nil {
|
||||
retval.Volumes[i].Projected.Sources[j].Secret = &models.Secret{Name: pod.Spec.Volumes[i].Projected.Sources[j].Secret.Name,
|
||||
Optional: pod.Spec.Volumes[i].Projected.Sources[j].Secret.Optional != nil}
|
||||
}
|
||||
if pod.Spec.Volumes[i].Projected.Sources[j].DownwardAPI != nil {
|
||||
retval.Volumes[i].Projected.Sources[j].DownwardAPI = true
|
||||
}
|
||||
if pod.Spec.Volumes[i].Projected.Sources[j].ConfigMap != nil {
|
||||
retval.Volumes[i].Projected.Sources[j].ConfigMap = &models.ConfigMap{Name: pod.Spec.Volumes[i].Projected.Sources[j].ConfigMap.Name,
|
||||
Optional: pod.Spec.Volumes[i].Projected.Sources[j].ConfigMap.Optional != nil}
|
||||
}
|
||||
if pod.Spec.Volumes[i].Projected.Sources[j].ServiceAccountToken != nil {
|
||||
retval.Volumes[i].Projected.Sources[j].ServiceAccountToken =
|
||||
&models.ServiceAccountToken{ExpirationSeconds: *pod.Spec.Volumes[i].Projected.Sources[j].ServiceAccountToken.ExpirationSeconds}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
retval.QosClass = string(getPodQOS(pod))
|
||||
nodeSelectorArray := make([]*models.NodeSelector, len(pod.Spec.NodeSelector))
|
||||
i = 0
|
||||
for key := range pod.Spec.NodeSelector {
|
||||
nodeSelectorArray[i] = &models.NodeSelector{Key: key, Value: pod.Spec.NodeSelector[key]}
|
||||
i++
|
||||
}
|
||||
retval.NodeSelector = nodeSelectorArray
|
||||
retval.Tolerations = make([]*models.Toleration, len(pod.Spec.Tolerations))
|
||||
for i := range pod.Spec.Tolerations {
|
||||
retval.Tolerations[i] = &models.Toleration{
|
||||
Effect: string(pod.Spec.Tolerations[i].Effect),
|
||||
Key: pod.Spec.Tolerations[i].Key,
|
||||
Value: pod.Spec.Tolerations[i].Value,
|
||||
Operator: string(pod.Spec.Tolerations[i].Operator),
|
||||
TolerationSeconds: *pod.Spec.Tolerations[i].TolerationSeconds,
|
||||
}
|
||||
}
|
||||
return retval, nil
|
||||
}
|
||||
|
||||
func describeStatus(status corev1.ContainerStatus) (*models.State, *models.State) {
|
||||
retval := &models.State{}
|
||||
last := &models.State{}
|
||||
state := status.State
|
||||
lastState := status.LastTerminationState
|
||||
switch {
|
||||
case state.Running != nil:
|
||||
retval.State = "Running"
|
||||
retval.Started = state.Running.StartedAt.Time.Format(time.RFC1123Z)
|
||||
case state.Waiting != nil:
|
||||
retval.State = "Waiting"
|
||||
retval.Reason = state.Waiting.Reason
|
||||
case state.Terminated != nil:
|
||||
retval.State = "Terminated"
|
||||
retval.Message = state.Terminated.Message
|
||||
retval.ExitCode = int64(state.Terminated.ExitCode)
|
||||
retval.Signal = int64(state.Terminated.Signal)
|
||||
retval.Started = state.Terminated.StartedAt.Time.Format(time.RFC1123Z)
|
||||
retval.Finished = state.Terminated.FinishedAt.Time.Format(time.RFC1123Z)
|
||||
switch {
|
||||
case lastState.Running != nil:
|
||||
last.State = "Running"
|
||||
last.Started = lastState.Running.StartedAt.Time.Format(time.RFC1123Z)
|
||||
case lastState.Waiting != nil:
|
||||
last.State = "Waiting"
|
||||
last.Reason = lastState.Waiting.Reason
|
||||
case lastState.Terminated != nil:
|
||||
last.State = "Terminated"
|
||||
last.Message = lastState.Terminated.Message
|
||||
last.ExitCode = int64(lastState.Terminated.ExitCode)
|
||||
last.Signal = int64(lastState.Terminated.Signal)
|
||||
last.Started = lastState.Terminated.StartedAt.Time.Format(time.RFC1123Z)
|
||||
last.Finished = lastState.Terminated.FinishedAt.Time.Format(time.RFC1123Z)
|
||||
default:
|
||||
last.State = "Waiting"
|
||||
}
|
||||
default:
|
||||
retval.State = "Waiting"
|
||||
}
|
||||
return retval, last
|
||||
}
|
||||
|
||||
func describeContainerPorts(cPorts []corev1.ContainerPort) []string {
|
||||
ports := make([]string, 0, len(cPorts))
|
||||
for _, cPort := range cPorts {
|
||||
ports = append(ports, fmt.Sprintf("%d/%s", cPort.ContainerPort, cPort.Protocol))
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
func describeContainerHostPorts(cPorts []corev1.ContainerPort) []string {
|
||||
ports := make([]string, 0, len(cPorts))
|
||||
for _, cPort := range cPorts {
|
||||
ports = append(ports, fmt.Sprintf("%d/%s", cPort.HostPort, cPort.Protocol))
|
||||
}
|
||||
return ports
|
||||
}
|
||||
|
||||
func getPodQOS(pod *corev1.Pod) corev1.PodQOSClass {
|
||||
requests := corev1.ResourceList{}
|
||||
limits := corev1.ResourceList{}
|
||||
zeroQuantity := resource.MustParse("0")
|
||||
isGuaranteed := true
|
||||
allContainers := []corev1.Container{}
|
||||
allContainers = append(allContainers, pod.Spec.Containers...)
|
||||
allContainers = append(allContainers, pod.Spec.InitContainers...)
|
||||
for _, container := range allContainers {
|
||||
// process requests
|
||||
for name, quantity := range container.Resources.Requests {
|
||||
if !isSupportedQoSComputeResource(name) {
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(zeroQuantity) == 1 {
|
||||
delta := quantity.DeepCopy()
|
||||
if _, exists := requests[name]; !exists {
|
||||
requests[name] = delta
|
||||
} else {
|
||||
delta.Add(requests[name])
|
||||
requests[name] = delta
|
||||
}
|
||||
}
|
||||
}
|
||||
// process limits
|
||||
qosLimitsFound := sets.NewString()
|
||||
for name, quantity := range container.Resources.Limits {
|
||||
if !isSupportedQoSComputeResource(name) {
|
||||
continue
|
||||
}
|
||||
if quantity.Cmp(zeroQuantity) == 1 {
|
||||
qosLimitsFound.Insert(string(name))
|
||||
delta := quantity.DeepCopy()
|
||||
if _, exists := limits[name]; !exists {
|
||||
limits[name] = delta
|
||||
} else {
|
||||
delta.Add(limits[name])
|
||||
limits[name] = delta
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !qosLimitsFound.HasAll(string(corev1.ResourceMemory), string(corev1.ResourceCPU)) {
|
||||
isGuaranteed = false
|
||||
}
|
||||
}
|
||||
if len(requests) == 0 && len(limits) == 0 {
|
||||
return corev1.PodQOSBestEffort
|
||||
}
|
||||
// Check is requests match limits for all resources.
|
||||
if isGuaranteed {
|
||||
for name, req := range requests {
|
||||
if lim, exists := limits[name]; !exists || lim.Cmp(req) != 0 {
|
||||
isGuaranteed = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if isGuaranteed &&
|
||||
len(requests) == len(limits) {
|
||||
return corev1.PodQOSGuaranteed
|
||||
}
|
||||
return corev1.PodQOSBurstable
|
||||
}
|
||||
|
||||
var supportedQoSComputeResources = sets.NewString(string(corev1.ResourceCPU), string(corev1.ResourceMemory))
|
||||
|
||||
func isSupportedQoSComputeResource(name corev1.ResourceName) bool {
|
||||
return supportedQoSComputeResources.Has(string(name))
|
||||
}
|
||||
|
||||
func translateTimestampSince(timestamp metav1.Time) string {
|
||||
if timestamp.IsZero() {
|
||||
return "<unknown>"
|
||||
}
|
||||
|
||||
return duration.HumanDuration(time.Since(timestamp.Time))
|
||||
}
|
||||
|
||||
//get values for prometheus metrics
|
||||
func getTenantMonitoringResponse(session *models.Principal, params operator_api.GetTenantMonitoringParams) (*models.TenantMonitoringInfo, *models.Error) {
|
||||
ctx, cancel := context.WithCancel(params.HTTPRequest.Context())
|
||||
|
||||
Reference in New Issue
Block a user