jq for Kubernetes

Kubernetes outputs deeply nested JSON. Master these patterns to extract exactly what you need from kubectl.


The Basics

Always use -o json to get machine-parseable output:

kubectl get pods -o json | jq '.'

Understanding the Structure

Every kubectl get returns this structure:

{
  "apiVersion": "v1",
  "kind": "List",
  "items": [
    { /* resource 1 */ },
    { /* resource 2 */ }
  ]
}

So you always start with .items[] to iterate resources.


Pod Operations

List Pod Names

# All namespaces
kubectl get pods -A -o json | jq -r '.items[].metadata.name'

# Specific namespace
kubectl get pods -n wazuh -o json | jq -r '.items[].metadata.name'

# With namespace prefix
kubectl get pods -A -o json | jq -r '.items[] | "\(.metadata.namespace)/\(.metadata.name)"'

Pod Status Analysis

# Simple status
kubectl get pods -A -o json | jq -r '
  .items[] | [.metadata.namespace, .metadata.name, .status.phase] | @tsv
' | column -t

# Detailed status with conditions
kubectl get pods -A -o json | jq '.items[] | {
  name: .metadata.name,
  namespace: .metadata.namespace,
  phase: .status.phase,
  ready: ([.status.conditions[] | select(.type == "Ready") | .status] | first),
  restarts: ([.status.containerStatuses[]?.restartCount] | add)
}'

Find Problem Pods

# Not Running
kubectl get pods -A -o json | jq -r '
  .items[] | select(.status.phase != "Running") |
  [.metadata.namespace, .metadata.name, .status.phase] | @tsv
' | column -t

# Pods with restarts > 5
kubectl get pods -A -o json | jq -r '
  .items[] |
  ([.status.containerStatuses[]?.restartCount] | add) as $restarts |
  select($restarts > 5) |
  [.metadata.namespace, .metadata.name, $restarts] | @tsv
' | column -t -N "NAMESPACE,POD,RESTARTS"

# Pods in CrashLoopBackOff
kubectl get pods -A -o json | jq -r '
  .items[] |
  select(.status.containerStatuses[]?.state.waiting?.reason == "CrashLoopBackOff") |
  [.metadata.namespace, .metadata.name] | @tsv
'

# Pending pods (scheduling issues)
kubectl get pods -A -o json | jq '
  .items[] | select(.status.phase == "Pending") |
  {
    name: .metadata.name,
    namespace: .metadata.namespace,
    reason: .status.conditions[0].reason,
    message: .status.conditions[0].message
  }
'

Container Images

# All images in use
kubectl get pods -A -o json | jq -r '
  [.items[].spec.containers[].image] | unique[]
'

# Images by namespace
kubectl get pods -A -o json | jq -r '
  .items[] | {
    namespace: .metadata.namespace,
    images: [.spec.containers[].image]
  }
' | jq -s 'group_by(.namespace) | map({
  namespace: .[0].namespace,
  images: [.[].images[]] | unique
})'

# Find pods using specific image
kubectl get pods -A -o json | jq -r --arg img "nginx" '
  .items[] | select(.spec.containers[].image | contains($img)) |
  [.metadata.namespace, .metadata.name] | @tsv
'

Resource Analysis

CPU and Memory Requests/Limits

# Full resource breakdown
kubectl get pods -A -o json | jq -r '
  .items[] |
  .spec.containers[] as $c |
  [
    .metadata.namespace,
    .metadata.name,
    $c.name,
    ($c.resources.requests.cpu // "none"),
    ($c.resources.limits.cpu // "none"),
    ($c.resources.requests.memory // "none"),
    ($c.resources.limits.memory // "none")
  ] | @tsv
' | column -t -N "NS,POD,CONTAINER,CPU_REQ,CPU_LIM,MEM_REQ,MEM_LIM"

# Pods without resource limits (bad practice)
kubectl get pods -A -o json | jq -r '
  .items[] |
  select(.spec.containers[] | .resources.limits == null) |
  [.metadata.namespace, .metadata.name] | @tsv
' | column -t

Node Resource Pressure

# Node capacity vs allocatable
kubectl get nodes -o json | jq '.items[] | {
  name: .metadata.name,
  capacity: {
    cpu: .status.capacity.cpu,
    memory: .status.capacity.memory
  },
  allocatable: {
    cpu: .status.allocatable.cpu,
    memory: .status.allocatable.memory
  }
}'

# Node conditions
kubectl get nodes -o json | jq '.items[] | {
  name: .metadata.name,
  conditions: [.status.conditions[] | {type, status}]
}'

Services and Networking

Service Endpoints

# Services with their endpoints
kubectl get endpoints -A -o json | jq '.items[] | {
  namespace: .metadata.namespace,
  service: .metadata.name,
  endpoints: [.subsets[]?.addresses[]?.ip] | unique
}'

# Services without endpoints (broken)
kubectl get endpoints -A -o json | jq '
  .items[] | select(.subsets == null or (.subsets | length) == 0) |
  {namespace: .metadata.namespace, service: .metadata.name}
'

Ingress Routes

# All ingress rules
kubectl get ingress -A -o json | jq -r '
  .items[] |
  .metadata.namespace as $ns |
  .spec.rules[]? |
  .host as $host |
  .http.paths[]? |
  [$ns, $host, .path, .backend.service.name, .backend.service.port.number] | @tsv
' | column -t -N "NS,HOST,PATH,SERVICE,PORT"

Events and Debugging

Recent Events

# Last 20 events
kubectl get events -A --sort-by='.lastTimestamp' -o json | jq -r '
  .items[-20:] | reverse | .[] |
  [
    .lastTimestamp | split("T") | .[1][:8],
    .involvedObject.namespace,
    .involvedObject.name,
    .type,
    .reason,
    .message[:60]
  ] | @tsv
' | column -t -s$'\t'

# Warning events only
kubectl get events -A -o json | jq '
  .items[] | select(.type == "Warning") |
  {
    namespace: .involvedObject.namespace,
    object: .involvedObject.name,
    reason,
    message,
    count,
    last: .lastTimestamp
  }
'

# Events for specific pod
kubectl get events -n wazuh -o json | jq --arg pod "wazuh-manager-0" '
  .items[] | select(.involvedObject.name == $pod) |
  {reason, message, count, lastTimestamp}
'

Pod Logs Context

# Get pod info before diving into logs
kubectl get pod mypod -o json | jq '{
  name: .metadata.name,
  containers: [.spec.containers[].name],
  restarts: [.status.containerStatuses[] | {name, restartCount}],
  started: .status.startTime,
  phase: .status.phase
}'

Deployments and Scaling

Deployment Status

# Deployment health
kubectl get deployments -A -o json | jq -r '
  .items[] |
  [
    .metadata.namespace,
    .metadata.name,
    .spec.replicas,
    .status.readyReplicas,
    .status.availableReplicas
  ] | @tsv
' | column -t -N "NS,DEPLOYMENT,DESIRED,READY,AVAILABLE"

# Deployments not fully available
kubectl get deployments -A -o json | jq '
  .items[] |
  select(.spec.replicas != .status.availableReplicas) |
  {
    namespace: .metadata.namespace,
    name: .metadata.name,
    desired: .spec.replicas,
    available: .status.availableReplicas
  }
'

Rollout Status

# Deployment conditions
kubectl get deployment myapp -o json | jq '
  .status.conditions[] | {type, status, reason, message}
'

# Replica sets for deployment
kubectl get rs -o json | jq --arg deploy "myapp" '
  .items[] |
  select(.metadata.ownerReferences[]?.name == $deploy) |
  {
    name: .metadata.name,
    desired: .spec.replicas,
    ready: .status.readyReplicas,
    revision: .metadata.annotations["deployment.kubernetes.io/revision"]
  }
'

Secrets and ConfigMaps

List Secrets by Type

# Group by type
kubectl get secrets -A -o json | jq '
  [.items[] | {namespace: .metadata.namespace, name: .metadata.name, type}] |
  group_by(.type) |
  map({type: .[0].type, count: length})
'

# TLS secrets expiring soon (if cert-manager)
kubectl get secrets -A -o json | jq '
  .items[] | select(.type == "kubernetes.io/tls") |
  {
    namespace: .metadata.namespace,
    name: .metadata.name,
    # Note: actual cert parsing needs openssl
  }
'

ConfigMap Keys

# List all keys in configmaps
kubectl get configmaps -A -o json | jq -r '
  .items[] |
  .metadata.namespace as $ns |
  .metadata.name as $name |
  (.data // {}) | keys[] |
  [$ns, $name, .] | @tsv
' | column -t -N "NS,CONFIGMAP,KEY"

Labels and Selectors

Find by Label

# Pods with specific label
kubectl get pods -A -o json | jq --arg app "wazuh" '
  .items[] | select(.metadata.labels.app == $app) |
  [.metadata.namespace, .metadata.name] | @tsv
'

# All unique label keys
kubectl get pods -A -o json | jq '
  [.items[].metadata.labels | keys] | flatten | unique
'

# Pods missing required label
kubectl get pods -A -o json | jq '
  .items[] | select(.metadata.labels.team == null) |
  {namespace: .metadata.namespace, name: .metadata.name}
'

Helm and ArgoCD

Helm Releases

# List Helm releases (via secrets)
kubectl get secrets -A -o json | jq '
  .items[] | select(.type == "helm.sh/release.v1") |
  {
    namespace: .metadata.namespace,
    release: .metadata.labels.name,
    version: .metadata.labels.version,
    status: .metadata.labels.status
  }
'

ArgoCD Applications

# ArgoCD app status
kubectl get applications -n argocd -o json | jq '.items[] | {
  name: .metadata.name,
  sync: .status.sync.status,
  health: .status.health.status,
  repo: .spec.source.repoURL,
  path: .spec.source.path
}'

# Out-of-sync apps
kubectl get applications -n argocd -o json | jq '
  .items[] | select(.status.sync.status != "Synced") |
  {name: .metadata.name, status: .status.sync.status}
'

Cluster-Wide Analysis

Resource Inventory

# Count resources by type
for kind in pods deployments services configmaps secrets; do
  COUNT=$(kubectl get $kind -A -o json | jq '.items | length')
  echo "$kind: $COUNT"
done

# Namespaces with resource counts
kubectl get pods -A -o json | jq '
  [.items[] | .metadata.namespace] |
  group_by(.) |
  map({namespace: .[0], pods: length}) |
  sort_by(.pods) | reverse
'

RBAC Analysis

# ClusterRoleBindings
kubectl get clusterrolebindings -o json | jq '.items[] | {
  name: .metadata.name,
  role: .roleRef.name,
  subjects: [.subjects[]? | "\(.kind)/\(.name)"]
}'

# ServiceAccounts with cluster-admin
kubectl get clusterrolebindings -o json | jq '
  .items[] | select(.roleRef.name == "cluster-admin") |
  {binding: .metadata.name, subjects: .subjects}
'

Performance Patterns

Use JSONPath When Simpler

For simple extractions, kubectl’s built-in JSONPath is faster:

# JSONPath (faster for simple queries)
kubectl get pods -o jsonpath='{.items[*].metadata.name}'

# jq (better for complex transformations)
kubectl get pods -o json | jq -r '.items[].metadata.name'

Reduce API Calls

# BAD: Multiple API calls
kubectl get pods -o json | jq -r '.items[].metadata.name' | while read pod; do
  kubectl get pod "$pod" -o json | jq '.status.phase'
done

# GOOD: Single API call
kubectl get pods -o json | jq -r '.items[] | [.metadata.name, .status.phase] | @tsv'

yq for Kubernetes Manifests

Use yq (YAML jq) to audit and validate Kubernetes manifests before deployment.

Security Auditing

# Find containers running as root
find manifests/ -name "*.yaml" -exec yq e '
  select(.kind == "Deployment" or .kind == "Pod") |
  .spec.template.spec.containers[] |
  select(.securityContext.runAsUser == 0 or .securityContext.runAsNonRoot == false) |
  {"file": filename, "container": .name, "issue": "runs as root"}
' {} \;

# Find privileged containers
yq e '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.securityContext.privileged == true) |
  {"container": .name, "privileged": true}
' deployment.yaml

# Check for missing security contexts
yq e '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.securityContext == null) |
  {"container": .name, "issue": "no securityContext defined"}
' deployment.yaml

# Find hostPath volumes (security risk)
yq e '
  select(.kind == "Deployment" or .kind == "Pod") |
  select(.spec.template.spec.volumes[].hostPath != null) |
  {"name": .metadata.name, "hostPaths": [.spec.template.spec.volumes[] | select(.hostPath) | .hostPath.path]}
' deployment.yaml

Resource Limit Auditing

# Containers without resource limits
yq e '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.resources.limits == null) |
  {"container": .name, "issue": "no resource limits"}
' deployment.yaml

# Containers with high CPU requests
yq e '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.resources.requests.cpu != null) |
  {"container": .name, "cpu_request": .resources.requests.cpu}
' deployment.yaml

# Memory requests vs limits mismatch
yq e '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.resources.requests.memory != .resources.limits.memory) |
  {
    "container": .name,
    "mem_request": .resources.requests.memory,
    "mem_limit": .resources.limits.memory
  }
' deployment.yaml

Image Policy Compliance

# Find images without explicit tags (using :latest)
yq e '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.image | test(":latest$") or (test(":") | not)) |
  {"container": .name, "image": .image, "issue": "missing or latest tag"}
' deployment.yaml

# Find images from non-approved registries
yq e '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.image | test("^(gcr\\.io|docker\\.io|quay\\.io)") | not) |
  {"container": .name, "image": .image, "issue": "non-approved registry"}
' deployment.yaml

# List all unique images across manifests
find manifests/ -name "*.yaml" -exec yq e '
  select(.kind == "Deployment" or .kind == "Pod") |
  .spec.template.spec.containers[].image
' {} \; | sort -u

Label and Annotation Compliance

# Find deployments missing required labels
yq e '
  select(.kind == "Deployment") |
  select(.metadata.labels.app == null or .metadata.labels.team == null) |
  {"name": .metadata.name, "issue": "missing required labels"}
' deployment.yaml

# Validate label values match pattern
yq e '
  select(.kind == "Deployment") |
  select(.metadata.labels.environment | test("^(dev|staging|prod)$") | not) |
  {"name": .metadata.name, "env_label": .metadata.labels.environment}
' deployment.yaml

# Check for deprecated annotations
yq e '
  select(.metadata.annotations["kubernetes.io/ingress.class"] != null) |
  {"name": .metadata.name, "issue": "deprecated ingress.class annotation"}
' ingress.yaml

Bulk Manifest Updates

# Add label to all deployments
yq e -i '
  select(.kind == "Deployment") |
  .metadata.labels.managed-by = "argocd"
' deployment.yaml

# Update image tag across all manifests
find manifests/ -name "*.yaml" -exec yq e -i '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[].image |= sub(":v1\\.0\\.0$", ":v1.1.0")
' {} \;

# Add resource limits to containers missing them
yq e -i '
  select(.kind == "Deployment") |
  .spec.template.spec.containers[] |
  select(.resources.limits == null) |
  .resources.limits = {"memory": "256Mi", "cpu": "200m"}
' deployment.yaml

Multi-Document YAML Processing

# Process all documents in a multi-doc YAML
yq e -N '
  select(.kind == "Service") |
  {"name": .metadata.name, "type": .spec.type, "ports": .spec.ports}
' multi-resource.yaml

# Extract specific resource from multi-doc
yq e 'select(.kind == "ConfigMap" and .metadata.name == "app-config")' all-resources.yaml

# Count resources by kind
yq e -N '.kind' all-resources.yaml | sort | uniq -c

Kustomize Output Validation

# Validate kustomize build output
kustomize build overlays/prod | yq e '
  select(.kind == "Deployment") |
  {
    "name": .metadata.name,
    "replicas": .spec.replicas,
    "image": .spec.template.spec.containers[0].image
  }
'

# Check for namespace conflicts
kustomize build . | yq e -N '.metadata.namespace' | sort | uniq -c

# Verify all resources have namespace
kustomize build . | yq e '
  select(.metadata.namespace == null and .kind != "Namespace" and .kind != "ClusterRole" and .kind != "ClusterRoleBinding") |
  {"kind": .kind, "name": .metadata.name, "issue": "missing namespace"}
'