Sunday, 12 October 2025

Fix Broken Redis Deployments in Kubernetes: Real-World Troubleshooting Guide

 Our course you can check :-   Udemy course  


Ques:- 

DevOps team deployed a redis app on Kubernetes cluster, which was working fine so far. This morning one of the team members was making some changes in this existing setup, but he made some mistakes and the app went down. We need to fix this as soon as possible. Please take a look.

The deployment name is redis-deployment. The pods are not in running state right now, so please look into the issue and fix the same.


Ans:-

raj@jumphost ~$ kubectl get deploy

NAME               READY   UP-TO-DATE   AVAILABLE   AGE

redis-deployment   0/1     1            0           31s


raj@jumphost ~$ kubectl get pod

NAME                                READY   STATUS              RESTARTS   AGE

redis-deployment-54cdf4f76d-x5wxs   0/1     ContainerCreating   0          39s


raj@jumphost ~$ kubectl describe pod redis-deployment-54cdf4f76d-x5wxs

Name:             redis-deployment-54cdf4f76d-x5wxs

Namespace:        default

Priority:         0

Service Account:  default

Node:             kodekloud-control-plane/172.17.0.2

Start Time:       Sun, 12 Oct 2025 11:05:25 +0000

Labels:           app=redis

                  pod-template-hash=54cdf4f76d

Annotations:      <none>

Status:           Pending

IP:               

IPs:              <none>

Controlled By:    ReplicaSet/redis-deployment-54cdf4f76d

Containers:

  redis-container:

    Container ID:   

    Image:          redis:alpin

    Image ID:       

    Port:           6379/TCP

    Host Port:      0/TCP

    State:          Waiting

      Reason:       ContainerCreating

    Ready:          False

    Restart Count:  0

    Requests:

      cpu:        300m

    Environment:  <none>

    Mounts:

      /redis-master from config (rw)

      /redis-master-data from data (rw)

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9t8tn (ro)

Conditions:

  Type              Status

  Initialized       True 

  Ready             False 

  ContainersReady   False 

  PodScheduled      True 

Volumes:

  data:

    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)

    Medium:     

    SizeLimit:  <unset>

  config:

    Type:      ConfigMap (a volume populated by a ConfigMap)

    Name:      redis-conig

    Optional:  false

  kube-api-access-9t8tn:

    Type:                    Projected (a volume that contains injected data from multiple sources)

    TokenExpirationSeconds:  3607

    ConfigMapName:           kube-root-ca.crt

    ConfigMapOptional:       <nil>

    DownwardAPI:             true

QoS Class:                   Burstable

Node-Selectors:              <none>

Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s

                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s

Events:

  Type     Reason       Age                From               Message

  ----     ------       ----               ----               -------

  Normal   Scheduled    57s                default-scheduler  Successfully assigned default/redis-deployment-54cdf4f76d-x5wxs to kodekloud-control-plane

  Warning  FailedMount  25s (x7 over 56s)  kubelet            MountVolume.SetUp failed for volume "config" : configmap "redis-conig" not found


raj@jumphost ~$ kubectl get cm

NAME               DATA   AGE

kube-root-ca.crt   1      5m46s

redis-config       2      65s


raj@jumphost ~$ kubectl get deploy

NAME               READY   UP-TO-DATE   AVAILABLE   AGE

redis-deployment   0/1     1            0           97s


raj@jumphost ~$ kubectl edit deploy redis-deployment

deployment.apps/redis-deployment edited


raj@jumphost ~$ kubectl get deploy

NAME               READY   UP-TO-DATE   AVAILABLE   AGE

redis-deployment   0/1     1            0           3m37s


raj@jumphost ~$ kubectl get pod

NAME                                READY   STATUS              RESTARTS   AGE

redis-deployment-54cdf4f76d-x5wxs   0/1     ContainerCreating   0          4m12s

redis-deployment-5bcd4c7d64-94d6m   0/1     ErrImagePull        0          42s


raj@jumphost ~$ kubectl describe pod redis-deployment-5bcd4c7d64-94d6m

Name:             redis-deployment-5bcd4c7d64-94d6m

Namespace:        default

Priority:         0

Service Account:  default

Node:             kodekloud-control-plane/172.17.0.2

Start Time:       Sun, 12 Oct 2025 11:08:55 +0000

Labels:           app=redis

                  pod-template-hash=5bcd4c7d64

Annotations:      <none>

Status:           Pending

IP:               10.244.0.5

IPs:

  IP:           10.244.0.5

Controlled By:  ReplicaSet/redis-deployment-5bcd4c7d64

Containers:

  redis-container:

    Container ID:   

    Image:          redis:alpin

    Image ID:       

    Port:           6379/TCP

    Host Port:      0/TCP

    State:          Waiting

      Reason:       ImagePullBackOff

    Ready:          False

    Restart Count:  0

    Requests:

      cpu:        300m

    Environment:  <none>

    Mounts:

      /redis-master from config (rw)

      /redis-master-data from data (rw)

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-jl9s7 (ro)

Conditions:

  Type              Status

  Initialized       True 

  Ready             False 

  ContainersReady   False 

  PodScheduled      True 

Volumes:

  data:

    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)

    Medium:     

    SizeLimit:  <unset>

  config:

    Type:      ConfigMap (a volume populated by a ConfigMap)

    Name:      redis-config

    Optional:  false

  kube-api-access-jl9s7:

    Type:                    Projected (a volume that contains injected data from multiple sources)

    TokenExpirationSeconds:  3607

    ConfigMapName:           kube-root-ca.crt

    ConfigMapOptional:       <nil>

    DownwardAPI:             true

QoS Class:                   Burstable

Node-Selectors:              <none>

Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s

                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s

Events:

  Type     Reason     Age                From               Message

  ----     ------     ----               ----               -------

  Normal   Scheduled  71s                default-scheduler  Successfully assigned default/redis-deployment-5bcd4c7d64-94d6m to kodekloud-control-plane

  Normal   Pulling    27s (x3 over 70s)  kubelet            Pulling image "redis:alpin"

  Warning  Failed     27s (x3 over 70s)  kubelet            Failed to pull image "redis:alpin": rpc error: code = NotFound desc = failed to pull and unpack image "docker.io/library/redis:alpin": failed to resolve reference "docker.io/library/redis:alpin": docker.io/library/redis:alpin: not found

  Warning  Failed     27s (x3 over 70s)  kubelet            Error: ErrImagePull

  Normal   BackOff    3s (x4 over 69s)   kubelet            Back-off pulling image "redis:alpin"

  Warning  Failed     3s (x4 over 69s)   kubelet            Error: ImagePullBackOff


raj@jumphost ~$ kubectl edit deploy redis-deployment

deployment.apps/redis-deployment edited


raj@jumphost ~$ kubectl get deploy

NAME               READY   UP-TO-DATE   AVAILABLE   AGE

redis-deployment   1/1     1            1           6m3s


raj@jumphost ~$ kubectl get pod

NAME                                READY   STATUS        RESTARTS   AGE

redis-deployment-54cdf4f76d-x5wxs   0/1     Terminating   0          6m10s

redis-deployment-7c8d4f6ddf-j5ngk   1/1     Running       0          15s

raj@jumphost ~$ 


Conclusion:-

In this practical DevOps tutorial, learn how to troubleshoot and fix a broken Redis deployment on a Kubernetes cluster. Follow a real-world scenario where a misconfiguration caused the Redis app to go down, and see how to identify and resolve issues like:

  • Debugging pods stuck in ContainerCreating or ImagePullBackOff
  • Analyzing kubectl describe and kubectl logs output
  • Fixing ConfigMap reference errors
  • Correcting image name typos
  • Ensuring successful pod recovery and service availability

This course is ideal for DevOps engineers, SREs, and Kubernetes practitioners who want to sharpen their troubleshooting skills in real-world environments.



Deploy a Node.js App on Kubernetes with NodePort Service

Our course you can check :-   Udemy course  


Ques:- 

Development team has completed development of one of the node applications, which they are planning to deploy on a Kubernetes cluster. They recently had a meeting with the DevOps team to share their requirements. Based on that, the DevOps team has listed out the exact requirements to deploy the app. Find below more details:

Create a deployment using gcr.io/kodekloud/centos-ssh-enabled:node image, replica count must be 2.

Create a service to expose this app, the service type must be NodePort, targetPort must be 8080 and nodePort should be 30012.

Make sure all the pods are in Running state after the deployment.

You can use any labels as per your choice.


Ans:-

Here’s the complete Kubernetes YAML configuration to deploy the Node.js application using the specified image and expose it via a NodePort service:

raj@jumphost ~$ cat pod.yaml 

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: node-app-deployment

  labels:

    app: node-app

spec:

  replicas: 2

  selector:

    matchLabels:

      app: node-app

  template:

    metadata:

      labels:

        app: node-app

    spec:

      containers:

        - name: node-container

          image: gcr.io/kodekloud/centos-ssh-enabled:node

          ports:

            - containerPort: 8080


---

apiVersion: v1

kind: Service

metadata:

  name: node-app-service

spec:

  type: NodePort

  selector:

    app: node-app

  ports:

    - port: 8080

      targetPort: 8080

      nodePort: 30012


raj@jumphost ~$ kubectl apply -f pod.yaml 

deployment.apps/node-app-deployment created

service/node-app-service created


raj@jumphost ~$ kubectl get all

NAME                                       READY   STATUS    RESTARTS   AGE

pod/node-app-deployment-57cd6cbd85-89svv   1/1     Running   0          52s

pod/node-app-deployment-57cd6cbd85-szln6   1/1     Running   0          52s


NAME                       TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE

service/kubernetes         ClusterIP   10.96.0.1       <none>        443/TCP          20m

service/node-app-service   NodePort    10.96.150.200   <none>        8080:30012/TCP   52s


NAME                                  READY   UP-TO-DATE   AVAILABLE   AGE

deployment.apps/node-app-deployment   2/2     2            2           52s


NAME                                             DESIRED   CURRENT   READY   AGE

replicaset.apps/node-app-deployment-57cd6cbd85   2         2         2       52s

raj@jumphost ~$ 


Ensure both pods are in Running state and the service is exposing port 30012.

Access the app in your browser at:

http://<NodeIP>:30012

Replace <NodeIP> with your Kubernetes node’s external IP.


Conclusion:-

In this hands-on tutorial, you’ll learn how to deploy a Node.js application on a Kubernetes cluster using a real-world DevOps workflow. We’ll walk through creating a deployment with multiple replicas and exposing the app using a NodePort service.

What You’ll Learn:

  • Deploying a Node.js app using a custom Docker image
  • Creating a Kubernetes Deployment with replica sets
  • Exposing the app using a NodePort service
  • Verifying pod and service status
  • Accessing the app from your browser

Perfect for DevOps engineers, developers, and SREs looking to gain practical Kubernetes experience.

Deploy Java-Based Tomcat Applications on Kubernetes: Step-by-Step Guide

Our course you can check :-   Udemy course  


Ques:-

A new java-based application is ready to be deployed on a Kubernetes cluster. The development team had a meeting with the DevOps team to share the requirements and application scope. The team is ready to setup an application stack for it under their existing cluster. Below you can find the details for this:

Create a namespace named tomcat-namespace-datacenter.

Create a deployment for tomcat app which should be named as tomcat-deployment-datacenter under the same namespace you created. Replica count should be 1, the container should be named as tomcat-container-datacenter, its image should be gcr.io/kodekloud/centos-ssh-enabled:tomcat and its container port should be 8080.

Create a service for tomcat app which should be named as tomcat-service-datacenter under the same namespace you created. Service type should be NodePort and nodePort should be 32227.

You can use any labels as per your choice.


Ans:-

Here’s the complete Kubernetes YAML configuration to deploy your Java-based Tomcat application as per the provided requirements:


raj@jumphost ~$ cat pod.yaml 

---

apiVersion: v1

kind: Namespace

metadata:

  name: tomcat-namespace-datacenter


---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: tomcat-deployment-datacenter

  namespace: tomcat-namespace-datacenter

  labels:

    app: tomcat

spec:

  replicas: 1

  selector:

    matchLabels:

      app: tomcat

  template:

    metadata:

      labels:

        app: tomcat

    spec:

      containers:

        - name: tomcat-container-datacenter

          image: gcr.io/kodekloud/centos-ssh-enabled:tomcat

          ports:

            - containerPort: 8080


---

apiVersion: v1

kind: Service

metadata:

  name: tomcat-service-datacenter

  namespace: tomcat-namespace-datacenter

spec:

  type: NodePort

  selector:

    app: tomcat

  ports:

    - port: 8080

      targetPort: 8080

      nodePort: 32227



raj@jumphost ~$ kubectl apply -f pod.yaml 

namespace/tomcat-namespace-datacenter created

deployment.apps/tomcat-deployment-datacenter created

service/tomcat-service-datacenter created


raj@jumphost ~$ kubectl get all -n tomcat-namespace-datacenter

NAME                                                READY   STATUS    RESTARTS   AGE

pod/tomcat-deployment-datacenter-6c696b4c7f-9qhdr   1/1     Running   0          36s


NAME                                TYPE       CLUSTER-IP    EXTERNAL-IP   PORT(S)          AGE

service/tomcat-service-datacenter   NodePort   10.96.33.73   <none>        8080:32227/TCP   36s


NAME                                           READY   UP-TO-DATE   AVAILABLE   AGE

deployment.apps/tomcat-deployment-datacenter   1/1     1            1           36s


NAME                                                      DESIRED   CURRENT   READY   AGE

replicaset.apps/tomcat-deployment-datacenter-6c696b4c7f   1         1         1       36s

raj@jumphost ~$ 


Then access the Tomcat app in your browser at:

http://<NodeIP>:32227

Replace <NodeIP> with your Kubernetes node's external IP.


Learn how to deploy a Java-based Tomcat application on a Kubernetes cluster using real-world DevOps practices. In this hands-on tutorial, you'll walk through the complete process of setting up a dedicated namespace, deploying a Tomcat container, and exposing it via a NodePort service.

What You’ll Learn:

  • Creating and managing Kubernetes namespaces
  • Deploying a Tomcat application using a custom Docker image
  • Configuring Kubernetes Deployments and Services
  • Exposing applications using NodePort
  • Accessing the Tomcat web interface from your browser

This course is ideal for DevOps engineers, system administrators, and developers looking to gain practical experience in deploying Java applications on Kubernetes.

Saturday, 11 October 2025

Deploy Grafana on Kubernetes: Monitor Your Apps with Ease

Our course you can check :-   Udemy course 


Ques:-      

DevOps teams is planning to set up a Grafana tool to collect and analyze analytics from some applications. They are planning to deploy it on Kubernetes cluster. Below you can find more details.

1.) Create a deployment named grafana-deployment-devops using any grafana image for Grafana app. Set other parameters as per your choice.

2.) Create NodePort type service with nodePort 32000 to expose the app.

You need not to make any configuration changes inside the Grafana app once deployed, just make sure you are able to access the Grafana login page.

Ans:-

Here’s how you can deploy Grafana on a Kubernetes cluster using a Deployment and a NodePort Service, as per your requirements:

raj@jumphost ~$ cat pod.yaml 

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: grafana-deployment-devops

  labels:

    app: grafana

spec:

  replicas: 1

  selector:

    matchLabels:

      app: grafana

  template:

    metadata:

      labels:

        app: grafana

    spec:

      containers:

        - name: grafana

          image: grafana/grafana:latest

          ports:

            - containerPort: 3000


---

apiVersion: v1

kind: Service

metadata:

  name: grafana-service

spec:

  type: NodePort

  selector:

    app: grafana

  ports:

    - port: 3000

      targetPort: 3000

      nodePort: 32000


raj@jumphost ~$ kubectl apply -f pod.yaml 

deployment.apps/grafana-deployment-devops created

service/grafana-service created


raj@jumphost ~$ kubectl get all

NAME                                            READY   STATUS              RESTARTS   AGE

pod/grafana-deployment-devops-77648df4c-nmc8j   0/1     ContainerCreating   0          12s


NAME                      TYPE        CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE

service/grafana-service   NodePort    10.96.249.159   <none>        3000:32000/TCP   13s

service/kubernetes        ClusterIP   10.96.0.1       <none>        443/TCP          20m


NAME                                        READY   UP-TO-DATE   AVAILABLE   AGE

deployment.apps/grafana-deployment-devops   0/1     1            0           13s


NAME                                                  DESIRED   CURRENT   READY   AGE

replicaset.apps/grafana-deployment-devops-77648df4c   1         1         0       13s

raj@jumphost ~$ 


You should see grafana-service with NodePort 32000.

Open your browser and go to:

http://<NodeIP>:32000

Replace <NodeIP> with your Kubernetes node’s external IP.

Deploy Jenkins CI Server on Kubernetes: Step-by-Step Guide for DevOps Engineers

Our course you can check :-   Udemy course 


Ques:-     

DevOps team is planning to set up a Jenkins CI server to create/manage some deployment pipelines for some of the projects. They want to set up the Jenkins server on Kubernetes cluster. Below you can find more details about the task:

1) Create a namespace jenkins

2) Create a Service for jenkins deployment. Service name should be jenkins-service under jenkins namespace, type should be NodePort, nodePort should be 30008

3) Create a Jenkins Deployment under jenkins namespace, It should be name as jenkins-deployment , labels app should be jenkins , container name should be jenkins-container , use jenkins/jenkins image , containerPort should be 8080 and replicas count should be 1.

Make sure to wait for the pods to be in running state and make sure you are able to access the Jenkins login screen in the browser.

Ans:-

Here’s how you can complete the Jenkins CI server setup on a Kubernetes cluster as per your requirements:

raj@jumphost ~$ cat pod.yaml 

---

apiVersion: v1

kind: Namespace

metadata:

  name: jenkins


---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: jenkins-deployment

  namespace: jenkins

  labels:

    app: jenkins

spec:

  replicas: 1

  selector:

    matchLabels:

      app: jenkins

  template:

    metadata:

      labels:

        app: jenkins

    spec:

      containers:

        - name: jenkins-container

          image: jenkins/jenkins

          ports:

            - containerPort: 8080


---

apiVersion: v1

kind: Service

metadata:

  name: jenkins-service

  namespace: jenkins

spec:

  type: NodePort

  selector:

    app: jenkins

  ports:

    - port: 8080

      targetPort: 8080

      nodePort: 30008


raj@jumphost ~$ kubectl apply -f pod.yaml 

namespace/jenkins created

deployment.apps/jenkins-deployment created

service/jenkins-service created


raj@jumphost ~$ kubectl get all -n jenkins

NAME                                      READY   STATUS    RESTARTS   AGE

pod/jenkins-deployment-667887d68c-wcz9b   1/1     Running   0          36s


NAME                      TYPE       CLUSTER-IP      EXTERNAL-IP   PORT(S)          AGE

service/jenkins-service   NodePort   10.96.192.186   <none>        8080:30008/TCP   36s


NAME                                 READY   UP-TO-DATE   AVAILABLE   AGE

deployment.apps/jenkins-deployment   1/1     1            1           36s


NAME                                            DESIRED   CURRENT   READY   AGE

replicaset.apps/jenkins-deployment-667887d68c   1         1         1       36s

raj@jumphost ~$ 


Access Jenkins UI: Open your browser and go to:

http://<NodeIP>:30008

Replace <NodeIP> with the IP of your Kubernetes node.

Friday, 10 October 2025

Kubernetes Pod with Environment Variables and Custom Commands – Real-World Example

Our course you can check :-   Udemy course 

Ques:-    

DevOps team is working on to setup some pre-requisites for an application that will send the greetings to different users. There is a sample deployment, that needs to be tested. Below is a scenario which needs to be configured on Kubernetes cluster. Please find below more details about it.

Create a pod named print-envars-greeting.

Configure spec as, the container name should be print-env-container and use bash image.

Create three environment variables:

a. GREETING and its value should be Welcome to

b. COMPANY and its value should be DevOps

c. GROUP and its value should be Industries

Use command ["/bin/sh", "-c", 'echo "$(GREETING) $(COMPANY) $(GROUP)"'] (please use this exact command), also set its restartPolicy policy to Never to avoid crash loop back.

You can check the output using kubectl logs -f print-envars-greeting command.


Ans:-


Here's the YAML manifest to create the print-envars-greeting pod as per your requirements:

raj@jumphost ~$ cat pod.yaml

apiVersion: v1

kind: Pod

metadata:

  name: print-envars-greeting

spec:

  containers:

  - name: print-env-container

    image: bash

    env:

    - name: GREETING

      value: "Welcome to"

    - name: COMPANY

      value: "DevOps"

    - name: GROUP

      value: "Industries"

    command: ["/bin/sh", "-c", 'echo "$(GREETING) $(COMPANY) $(GROUP)"']

  restartPolicy: Never


raj@jumphost ~$ kubectl apply -f pod.yaml  

pod/print-envars-greeting created


raj@jumphost ~$ kubectl get pods

NAME                    READY   STATUS      RESTARTS   AGE

print-envars-greeting   0/1     Completed   0          15s


raj@jumphost ~$ kubectl logs print-envars-greeting

Welcome to DevOps Industries

raj@jumphost ~$ 


You can convert this into a Job or CronJob for repeated execution!


Deploy a Highly Available Static Website on Kubernetes using NGINX

Our course you can check :-   Udemy course 

Ques:-    

Developers are developing a static website and they want to deploy it on Kubernetes cluster. They want it to be highly available and scalable. Therefore, based on the requirements, the DevOps team has decided to create a deployment for it with multiple replicas. Below you can find more details about it:

Create a deployment using nginx image with latest tag only and remember to mention the tag i.e nginx:latest. Name it as nginx-deployment. The container should be named as nginx-container, also make sure replica counts are 3.

Create a NodePort type service named nginx-service. The nodePort should be 30011.


Ans:-

Here’s how you can create the Deployment and NodePort Service for your static website using the nginx:latest image in a Kubernetes cluster.

raj@jumphost ~$ cat pod.yaml 

---

apiVersion: apps/v1

kind: Deployment

metadata:

  name: nginx-deployment

spec:

  replicas: 3

  selector:

    matchLabels:

      app: nginx

  template:

    metadata:

      labels:

        app: nginx

    spec:

      containers:

      - name: nginx-container

        image: nginx:latest

        ports:

        - containerPort: 80


---

apiVersion: v1

kind: Service

metadata:

  name: nginx-service

spec:

  type: NodePort

  selector:

    app: nginx

  ports:

  - port: 80

    targetPort: 80

    nodePort: 30011


raj@jumphost ~$ kubectl apply -f pod.yaml 

deployment.apps/nginx-deployment created

service/nginx-service created


raj@jumphost ~$ kubectl get deploy

NAME               READY   UP-TO-DATE   AVAILABLE   AGE

nginx-deployment   3/3     3            3           19s


raj@jumphost ~$ kubectl get pod

NAME                                READY   STATUS    RESTARTS   AGE

nginx-deployment-5b58668cfc-4tq5f   1/1     Running   0          26s

nginx-deployment-5b58668cfc-sls7t   1/1     Running   0          26s

nginx-deployment-5b58668cfc-w5pt6   1/1     Running   0          26s


raj@jumphost ~$ kubectl get svc

NAME            TYPE        CLUSTER-IP    EXTERNAL-IP   PORT(S)        AGE

nginx-service   NodePort    10.96.37.48   <none>        80:30011/TCP   36s


Once deployed, your static website will be accessible via any node's IP on port 30011.


Kubernetes Sidecar Pattern: Ship Nginx Logs with Ubuntu in a Shared Volume

Our course you can check :-   Udemy course 

Ques:-    


We have a web server container running the nginx image. The access and error logs generated by the web server are not critical enough to be placed on a persistent volume. However, Nautilus developers need access to the last 24 hours of logs so that they can trace issues and bugs. Therefore, we need to ship the access and error logs for the web server to a log-aggregation service. Following the separation of concerns principle, we implement the Sidecar pattern by deploying a second container that ships the error and access logs from nginx. Nginx does one thing, and it does it well—serving web pages. The second container also specializes in its task—shipping logs. Since containers are running on the same Pod, we can use a shared emptyDir volume to read and write logs.

Create a pod named webserver.

Create an emptyDir volume shared-logs.

Create two containers from nginx and ubuntu images with latest tag only and remember to mention tag i.e nginx:latest, nginx container name should be nginx-container and ubuntu container name should be sidecar-container on webserver pod.

Add command on sidecar-container "sh","-c","while true; do cat /var/log/nginx/access.log /var/log/nginx/error.log; sleep 30; done"

Mount the volume shared-logs on both containers at location /var/log/nginx, all containers should be up and running.


Ans:-

Here’s the full YAML manifest to create the webserver Pod with the Sidecar pattern using a shared emptyDir volume for logs:

raj@jumphost ~$ cat pod.yaml 

apiVersion: v1

kind: Pod

metadata:

  name: webserver

spec:

  volumes:

    - name: shared-logs

      emptyDir: {}

  containers:

    - name: nginx-container

      image: nginx:latest

      volumeMounts:

        - name: shared-logs

          mountPath: /var/log/nginx

    - name: sidecar-container

      image: ubuntu:latest

      command: ["sh", "-c", "while true; do cat /var/log/nginx/access.log /var/log/nginx/error.log; sleep 30; done"]

      volumeMounts:

        - name: shared-logs

          mountPath: /var/log/nginx


raj@jumphost ~$ kubectl apply -f pod.yaml


raj@jumphost ~$ kubectl get pod

NAME        READY   STATUS    RESTARTS   AGE

webserver   2/2     Running   0          77s


raj@jumphost ~$ kubectl logs webserver -c sidecar-container


2025/10/10 11:24:23 [notice] 1#1: using the "epoll" event method

2025/10/10 11:24:23 [notice] 1#1: nginx/1.29.2


Explanation:

  • Pod name: webserver
  • Volume: shared-logs of type emptyDir (non-persistent, shared between containers)
  • Containers:
    • nginx-container: runs nginx:latest, mounts /var/log/nginx
    • sidecar-container: runs ubuntu:latest, reads logs every 30 seconds using a shell loop

Thursday, 9 October 2025

Kubernetes Shared Volumes: Multi-Container Pod with emptyDir Volume

Our course you can check :-   Udemy course 

Ques:-   


We are working on an application that will be deployed on multiple containers within a pod on Kubernetes cluster. There is a requirement to share a volume among the containers to save some temporary data. DevOps team is developing a similar template to replicate the scenario. Below you can find more details about it. Create a pod named volume-share-xfusion. For the first container, use image ubuntu with latest tag only and remember to mention the tag i.e ubuntu:latest, container should be named as volume-container-xfusion-1, and run a sleep command for it so that it remains in running state. Volume volume-share should be mounted at path /tmp/beta. For the second container, use image ubuntu with the latest tag only and remember to mention the tag i.e ubuntu:latest, container should be named as volume-container-xfusion-2, and again run a sleep command for it so that it remains in running state. Volume volume-share should be mounted at path /tmp/games. Volume name should be volume-share of type emptyDir. After creating the pod, exec into the first container i.e volume-container-xfusion-1, and just for testing create a file beta.txt with any content under the mounted path of first container i.e /tmp/beta. The file beta.txt should be present under the mounted path /tmp/games on the second container volume-container-xfusion-2 as well, since they are using a shared volume.
Ans:-
Here's the complete Kubernetes manifest to create the pod volume-share-xfusion with two containers sharing an emptyDir volume named volume-share:
raj@jumphost ~$ cat pod.yaml apiVersion: v1 kind: Pod metadata: name: volume-share-xfusion spec: volumes: - name: volume-share emptyDir: {} containers: - name: volume-container-xfusion-1 image: ubuntu:latest command: ["sleep", "3600"] volumeMounts: - name: volume-share mountPath: /tmp/beta - name: volume-container-xfusion-2 image: ubuntu:latest command: ["sleep", "3600"] volumeMounts: - name: volume-share mountPath: /tmp/games Create the pod:
raj@jumphost ~$ kubectl apply -f pod.yaml pod/volume-share-xfusion created Verify pod status: raj@jumphost ~$ kubectl get pods volume-share-xfusion NAME READY STATUS RESTARTS AGE volume-share-xfusion 2/2 Running 0 15s Exec into the first container and create the file:
raj@jumphost ~$ kubectl exec -it volume-share-xfusion -c volume-container-xfusion-1 -- bash root@volume-share-xfusion:/# echo "Shared volume test" > /tmp/beta/beta.txt root@volume-share-xfusion:/# cat /tmp/beta/beta.txt Shared volume test root@volume-share-xfusion:/# exit Exec into the second container and verify the file: raj@jumphost ~$ kubectl exec -it volume-share-xfusion -c volume-container-xfusion-2 -- bash root@volume-share-xfusion:/# cat /tmp/games/beta.txt Shared volume test root@volume-share-xfusion:/# You should see the content Shared volume test, confirming that the volume is shared correctly between the containers.

Tuesday, 7 October 2025

Troubleshooting Nginx & PHP-FPM on Kubernetes: Real-World Debugging & Fix

Our course you can check :-   Udemy course 

Ques:-   

We encountered an issue with our Nginx and PHP-FPM setup on the Kubernetes cluster this morning, which halted its functionality. Investigate and rectify the issue:

The pod name is nginx-phpfpm and configmap name is nginx-config. Identify and fix the problem.

Once resolved, copy /home/thor/index.php file from the jump host to the nginx-container within the nginx document root.



Ans:-

Based on the investigation, the nginx-phpfpm pod stopped working due to a misconfigured volume mount path. The Nginx container was expecting to serve files from /var/www/html, but the actual volume was mounted at a different path, causing the application to fail.

Shared volume: emptyDir named shared-files is correctly mounted in both containers.

Mismatch in Shared Volume Paths

  • PHP-FPM writes to /usr/share/nginx/html
  • Nginx serves from /var/www/html

    Use the same mount path in both containers, e.g.:

    mountPath: /var/www/html


raj@jumphost ~$ kubectl get cm

NAME               DATA   AGE

kube-root-ca.crt   1      15m

nginx-config       1      9m55s


raj@jumphost ~$ kubectl get pod

NAME           READY   STATUS    RESTARTS   AGE

nginx-phpfpm   2/2     Running   0          10m


raj@jumphost ~$ kubectl get svc

NAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)          AGE

kubernetes      ClusterIP   10.96.0.1      <none>        443/TCP          16m

nginx-service   NodePort    10.96.119.83   <none>        8099:30008/TCP   10m


raj@jumphost ~$ kubectl describe cm nginx-config 

Name:         nginx-config

Namespace:    default

Labels:       <none>

Annotations:  <none>


Data

====

nginx.conf:

----

events {

}

http {

  server {

    listen 8099 default_server;

    listen [::]:8099 default_server;


    # Set nginx to serve files from the shared volume!

    root /var/www/html;

    index  index.html index.htm index.php;

    server_name _;

    location / {

      try_files $uri $uri/ =404;

    }

    location ~ \.php$ {

      include fastcgi_params;

      fastcgi_param REQUEST_METHOD $request_method;

      fastcgi_param SCRIPT_FILENAME $document_root$fastcgi_script_name;

      fastcgi_pass 127.0.0.1:9000;

    }

  }

}



BinaryData

====


Events:  <none>



raj@jumphost ~$ kubectl get pod nginx-phpfpm -oyaml

apiVersion: v1

kind: Pod

metadata:

  annotations:

    kubectl.kubernetes.io/last-applied-configuration: |

      {"apiVersion":"v1","kind":"Pod","metadata":{"annotations":{},"labels":{"app":"php-app"},"name":"nginx-phpfpm","namespace":"default"},"spec":{"containers":[{"image":"php:7.2-fpm-alpine","name":"php-fpm-container","volumeMounts":[{"mountPath":"/usr/share/nginx/html","name":"shared-files"}]},{"image":"nginx:latest","name":"nginx-container","volumeMounts":[{"mountPath":"/var/www/html","name":"shared-files"},{"mountPath":"/etc/nginx/nginx.conf","name":"nginx-config-volume","subPath":"nginx.conf"}]}],"volumes":[{"emptyDir":{},"name":"shared-files"},{"configMap":{"name":"nginx-config"},"name":"nginx-config-volume"}]}}

  creationTimestamp: "2025-10-08T02:07:06Z"

  labels:

    app: php-app

  name: nginx-phpfpm

  namespace: default

  resourceVersion: "941"

  uid: fe31425d-d2f5-4d56-8450-d2a0c05bb077

spec:

  containers:

  - image: php:7.2-fpm-alpine

    imagePullPolicy: IfNotPresent

    name: php-fpm-container

    resources: {}

    terminationMessagePath: /dev/termination-log

    terminationMessagePolicy: File

    volumeMounts:

    - mountPath: /usr/share/nginx/html

      name: shared-files

    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount

      name: kube-api-access-s5m82

      readOnly: true

  - image: nginx:latest

    imagePullPolicy: Always

    name: nginx-container

    resources: {}

    terminationMessagePath: /dev/termination-log

    terminationMessagePolicy: File

    volumeMounts:

    - mountPath: /var/www/html

      name: shared-files

    - mountPath: /etc/nginx/nginx.conf

      name: nginx-config-volume

      subPath: nginx.conf

    - mountPath: /var/run/secrets/kubernetes.io/serviceaccount

      name: kube-api-access-s5m82

      readOnly: true

  dnsPolicy: ClusterFirst

  enableServiceLinks: true

  nodeName: kodekloud-control-plane

  preemptionPolicy: PreemptLowerPriority

  priority: 0

  restartPolicy: Always

  schedulerName: default-scheduler

  securityContext: {}

  serviceAccount: default

  serviceAccountName: default

  terminationGracePeriodSeconds: 30

  tolerations:

  - effect: NoExecute

    key: node.kubernetes.io/not-ready

    operator: Exists

    tolerationSeconds: 300

  - effect: NoExecute

    key: node.kubernetes.io/unreachable

    operator: Exists

    tolerationSeconds: 300

  volumes:

  - emptyDir: {}

    name: shared-files

  - configMap:

      defaultMode: 420

      name: nginx-config

    name: nginx-config-volume

  - name: kube-api-access-s5m82

    projected:

      defaultMode: 420

      sources:

      - serviceAccountToken:

          expirationSeconds: 3607

          path: token

      - configMap:

          items:

          - key: ca.crt

            path: ca.crt

          name: kube-root-ca.crt

      - downwardAPI:

          items:

          - fieldRef:

              apiVersion: v1

              fieldPath: metadata.namespace

            path: namespace

status:

  conditions:

  - lastProbeTime: null

    lastTransitionTime: "2025-10-08T02:07:06Z"

    status: "True"

    type: Initialized

  - lastProbeTime: null

    lastTransitionTime: "2025-10-08T02:07:19Z"

    status: "True"

    type: Ready

  - lastProbeTime: null

    lastTransitionTime: "2025-10-08T02:07:19Z"

    status: "True"

    type: ContainersReady

  - lastProbeTime: null

    lastTransitionTime: "2025-10-08T02:07:06Z"

    status: "True"

    type: PodScheduled

  containerStatuses:

  - containerID: containerd://8c9917ce159003d767caeef3f0c165c702a8dccb3cfc14c0f55ee915318a73f6

    image: docker.io/library/nginx:latest

    imageID: docker.io/library/nginx@sha256:8adbdcb969e2676478ee2c7ad333956f0c8e0e4c5a7463f4611d7a2e7a7ff5dc

    lastState: {}

    name: nginx-container

    ready: true

    restartCount: 0

    started: true

    state:

      running:

        startedAt: "2025-10-08T02:07:18Z"

  - containerID: containerd://140e6aae1e89eeea22f8112bc02535514763d73abc166fab83691c03d33aa783

    image: docker.io/library/php:7.2-fpm-alpine

    imageID: docker.io/library/php@sha256:2e2d92415f3fc552e9a62548d1235f852c864fcdc94bcf2905805d92baefc87f

    lastState: {}

    name: php-fpm-container

    ready: true

    restartCount: 0

    started: true

    state:

      running:

        startedAt: "2025-10-08T02:07:10Z"

  hostIP: 172.17.0.2

  phase: Running

  podIP: 10.244.0.5

  podIPs:

  - ip: 10.244.0.5

  qosClass: BestEffort

  startTime: "2025-10-08T02:07:06Z"


raj@jumphost ~$ kubectl edit pod nginx-phpfpm 

raj@jumphost ~$ kubectl cp /home/thor/index.php default/nginx-phpfpm:/var/www/html/index.php -c nginx-container

raj@jumphost ~$ curl http://172.17.0.2:30008


Deploy Highly Available Applications Using Kubernetes ReplicationController (nginx:latest)

Our course you can check :-   Udemy course 

Ques:-  

DevOps team is establishing a ReplicationController to deploy multiple pods for hosting applications that require a highly available infrastructure. Follow the specifications below to create the ReplicationController:

Create a ReplicationController using the nginx image with latest tag, and name it nginx-replicationcontroller.

Assign labels app as nginx_app, and type as front-end. Ensure the container is named nginx-container and set the replica count to 3.

All pods should be running state post-deployment.


Ans:-

🔍 What is a ReplicationController?

A ReplicationController (RC) is a Kubernetes object that:

  • Ensures a fixed number of pod replicas are running.
  • Automatically creates new pods if some fail or are deleted.
  • Uses a label selector to manage the pods it controls.

⚠️ Note: ReplicationController has largely been replaced by Deployments and ReplicaSets, which offer more advanced features like rolling updates and rollbacks.


Use Cases of ReplicationController

  • High Availability: Ensures multiple instances of an application are always running.
  • Fault Tolerance: Automatically replaces failed pods.
  • Load Distribution: Helps distribute traffic across multiple pod replicas.
  • Legacy Support: Still used in older Kubernetes setups or for backward compatibility.

🆚 ReplicationController vs ReplicaSet

Feature  ReplicationControllerReplicaSet
Label selector    Exact match only        Supports set-based
Rolling updates     Not supported   Supported via Deployment
Preferred usage        Legacy      Modern Kubernetes




raj@jumphost ~$ cat pod.yaml 
apiVersion: v1
kind: ReplicationController
metadata:
  name: nginx-replicationcontroller
  labels:
    app: nginx_app
    type: front-end
spec:
  replicas: 3
  selector:
    app: nginx_app
    type: front-end
  template:
    metadata:
      labels:
        app: nginx_app
        type: front-end
    spec:
      containers:
      - name: nginx-container
        image: nginx:latest
        ports:
        - containerPort: 80

raj@jumphost ~$ kubectl apply -f pod.yaml 
replicationcontroller/nginx-replicationcontroller created

raj@jumphost ~$ kubectl get replicationcontroller
NAME                          DESIRED   CURRENT   READY   AGE
nginx-replicationcontroller   3         3         3       59s

raj@jumphost ~$ kubectl get pod
NAME                                READY   STATUS    RESTARTS   AGE
nginx-replicationcontroller-7mf6t   1/1     Running   0          68s
nginx-replicationcontroller-kbmqx   1/1     Running   0          68s
nginx-replicationcontroller-n82rj   1/1     Running   0          68s
raj@jumphost ~$ 

Update Kubernetes Deployment and Service Without Deletion in Prod (nginx-deployment)

Our course you can check :-   Udemy course 

Ques:- 

An application deployed on the Kubernetes cluster requires an update with new features developed by the application development team. The existing setup includes a deployment named nginx-deployment and a service named nginx-service. Below are the necessary changes to be implemented without deleting the deployment and service:

1.) Modify the service nodeport from 30008 to 32165

2.) Change the replicas count from 1 to 5

3.) Update the image from nginx:1.18 to nginx:latest


Ans:-

raj@jumphost ~$ kubectl get svc

NAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE

kubernetes      ClusterIP   10.96.0.1      <none>        443/TCP        8m54s

nginx-service   NodePort    10.96.188.16   <none>        80:30008/TCP   6m40s


raj@jumphost ~$ kubectl get deploy

NAME               READY   UP-TO-DATE   AVAILABLE   AGE

nginx-deployment   1/1     1            1           6m54s


raj@jumphost ~$ kubectl get pod

NAME                                READY   STATUS    RESTARTS   AGE

nginx-deployment-58cf54c7f6-sq9d8   1/1     Running   0          7m3s


raj@jumphost ~$ kubectl patch service nginx-service -p '{"spec": {"ports": [{"port": 80, "targetPort": 80, "protocol": "TCP", "nodePort": 32165}]}}'

service/nginx-service patched


raj@jumphost ~$ kubectl get svcNAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE

kubernetes      ClusterIP   10.96.0.1      <none>        443/TCP        10m

nginx-service   NodePort    10.96.188.16   <none>        80:32165/TCP   8m26s


raj@jumphost ~$ kubectl scale deployment nginx-deployment --replicas=5

deployment.apps/nginx-deployment scaled


raj@jumphost ~$ kubectl get deployNAME               READY   UP-TO-DATE   AVAILABLE   AGE

nginx-deployment   5/5     5            5           9m14s


raj@jumphost ~$ kubectl get podNAME                                READY   STATUS    RESTARTS   AGE

nginx-deployment-58cf54c7f6-8jtw2   1/1     Running   0          23s

nginx-deployment-58cf54c7f6-dhbwl   1/1     Running   0          23s

nginx-deployment-58cf54c7f6-ph4d9   1/1     Running   0          23s

nginx-deployment-58cf54c7f6-sq9d8   1/1     Running   0          9m29s

nginx-deployment-58cf54c7f6-vjmr2   1/1     Running   0          23s


raj@jumphost ~$ kubectl get deployment nginx-deployment -o yaml | grep -i image:

      - image: nginx:1.18


raj@jumphost ~$ kubectl set image deployment/nginx-deployment nginx-container=nginx:latest

deployment.apps/nginx-deployment image updated


raj@jumphost ~$ kubectl get deployment nginx-deployment -o yaml | grep -i image:

      - image: nginx:latest


raj@jumphost ~$ kubectl get svc

NAME            TYPE        CLUSTER-IP     EXTERNAL-IP   PORT(S)        AGE

kubernetes      ClusterIP   10.96.0.1      <none>        443/TCP        14m

nginx-service   NodePort    10.96.188.16   <none>        80:32165/TCP   11m


raj@jumphost ~$ kubectl get deploy

NAME               READY   UP-TO-DATE   AVAILABLE   AGE

nginx-deployment   5/5     5            5           12m


raj@jumphost ~$ kubectl get pod

NAME                                READY   STATUS    RESTARTS   AGE

nginx-deployment-854ff588b7-dzhpp   1/1     Running   0          46s

nginx-deployment-854ff588b7-fkvrk   1/1     Running   0          56s

nginx-deployment-854ff588b7-qknjc   1/1     Running   0          56s

nginx-deployment-854ff588b7-sjlck   1/1     Running   0          45s

nginx-deployment-854ff588b7-t2hgr   1/1     Running   0          56s

raj@jumphost ~$ 

Monday, 6 October 2025

Debugging Pods & Sidecars in Production

Our course you can check :-   Udemy course 



Ques:-

A junior DevOps team member encountered difficulties deploying a stack on the Kubernetes cluster. The pod fails to start, presenting errors. Let's troubleshoot and rectify the issue promptly.

There is a pod named webserver, and the container within it is named httpd-container, its utilizing the httpd:latest image.

Additionally, there's a sidecar container named sidecar-container using the ubuntu:latest image.

Identify and address the issue to ensure the pod is in the running state and the application is accessible.


Ans:-


raj@jumphost ~$ kubectl get pod

NAME        READY   STATUS             RESTARTS   AGE

webserver   1/2     ImagePullBackOff   0          6m36s


raj@jumphost ~$ kubectl logs webserver

Defaulted container "httpd-container" out of: httpd-container, sidecar-container

Error from server (BadRequest): container "httpd-container" in pod "webserver" is waiting to start: trying and failing to pull image


raj@jumphost ~$ kubectl describe pod webserver

Name:             webserver

Namespace:        default

Priority:         0

Service Account:  default

Node:             kodekloud-control-plane/172.17.0.2

Start Time:       Mon, 06 Oct 2025 09:52:57 +0000

Labels:           app=web-app

Annotations:      <none>

Status:           Pending

IP:               10.244.0.5

IPs:

  IP:  10.244.0.5

Containers:

  httpd-container:

    Container ID:   

    Image:          httpd:latests

    Image ID:       

    Port:           <none>

    Host Port:      <none>

    State:          Waiting

      Reason:       ImagePullBackOff

    Ready:          False

    Restart Count:  0

    Environment:    <none>

    Mounts:

      /var/log/httpd from shared-logs (rw)

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-7v7nm (ro)

  sidecar-container:

    Container ID:  containerd://c559916254f3ae3d5397b2c41350cfb8471db98945f2a9061cc3757a41bc7354

    Image:         ubuntu:latest

    Image ID:      docker.io/library/ubuntu@sha256:728785b59223d755e3e5c5af178fab1be7031f3522c5ccd7a0b32b80d8248123

    Port:          <none>

    Host Port:     <none>

    Command:

      sh

      -c

      while true; do cat /var/log/httpd/access.log /var/log/httpd/error.log; sleep 30; done

    State:          Running

      Started:      Mon, 06 Oct 2025 09:53:02 +0000

    Ready:          True

    Restart Count:  0

    Environment:    <none>

    Mounts:

      /var/log/httpd from shared-logs (rw)

      /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-7v7nm (ro)

Conditions:

  Type              Status

  Initialized       True 

  Ready             False 

  ContainersReady   False 

  PodScheduled      True 

Volumes:

  shared-logs:

    Type:       EmptyDir (a temporary directory that shares a pod's lifetime)

    Medium:     

    SizeLimit:  <unset>

  kube-api-access-7v7nm:

    Type:                    Projected (a volume that contains injected data from multiple sources)

    TokenExpirationSeconds:  3607

    ConfigMapName:           kube-root-ca.crt

    ConfigMapOptional:       <nil>

    DownwardAPI:             true

QoS Class:                   BestEffort

Node-Selectors:              <none>

Tolerations:                 node.kubernetes.io/not-ready:NoExecute op=Exists for 300s

                             node.kubernetes.io/unreachable:NoExecute op=Exists for 300s

Events:

  Type     Reason     Age                     From               Message

  ----     ------     ----                    ----               -------

  Normal   Scheduled  7m20s                   default-scheduler  Successfully assigned default/webserver to kodekloud-control-plane

  Normal   Pulling    7m19s                   kubelet            Pulling image "ubuntu:latest"

  Normal   Pulled     7m15s                   kubelet            Successfully pulled image "ubuntu:latest" in 3.742060201s (3.742079128s including waiting)

  Normal   Created    7m15s                   kubelet            Created container sidecar-container

  Normal   Started    7m15s                   kubelet            Started container sidecar-container

  Normal   Pulling    6m35s (x3 over 7m19s)   kubelet            Pulling image "httpd:latests"

  Warning  Failed     6m35s (x3 over 7m19s)   kubelet            Failed to pull image "httpd:latests": rpc error: code = NotFound desc = failed to pull and unpack image "docker.io/library/httpd:latests": failed to resolve reference "docker.io/library/httpd:latests": docker.io/library/httpd:latests: not found

  Warning  Failed     6m35s (x3 over 7m19s)   kubelet            Error: ErrImagePull

  Warning  Failed     5m59s (x6 over 7m15s)   kubelet            Error: ImagePullBackOff

  Normal   BackOff    2m13s (x21 over 7m15s)  kubelet            Back-off pulling image "httpd:latests"


raj@jumphost ~$ kubectl edit pod webserver

pod/webserver edited


raj@jumphost ~$ kubectl get pod

NAME        READY   STATUS    RESTARTS   AGE

webserver   2/2     Running   0          10m

raj@jumphost ~$