Setup EFK Stack on Amazon EKS cluster

About EFK

kubectl get nodes
NAME STATUS ROLES AGE VERSION
ip-100129220.ec2.internal Ready <none> 27m v1.24.10-eks-48e63af
ip-10014955.ec2.internal Ready <none> 26m v1.24.10-eks-48e63af
ip-100190100.ec2.internal Ready <none> 30m v1.24.10-eks-48e63af
ip-100226108.ec2.internal Ready <none> 30m v1.24.10-eks-48e63af

Setup EFK Stack

Elasticsearch as a Statefulset

apiVersion: apps/v1

kind: StatefulSet

metadata:

  name: es-cluster

spec:

  serviceName: elasticsearch

  replicas: 3

  selector:

    matchLabels:

      app: elasticsearch

  template:

    metadata:

      labels:

        app: elasticsearch

    spec:

      containers:

      - name: elasticsearch

        image: docker.elastic.co/elasticsearch/elasticsearch:7.5.0

        resources:

            limits:

              cpu: 1000m

            requests:

              cpu: 100m

        ports:

        - containerPort: 9200

          name: rest

          protocol: TCP

        - containerPort: 9300

          name: inter-node

          protocol: TCP

        volumeMounts:

        - name: data

          mountPath: /usr/share/elasticsearch/data

        env:

          - name: cluster.name

            value: k8s-logs

          - name: node.name

            valueFrom:

              fieldRef:

                fieldPath: metadata.name

          - name: discovery.seed_hosts

            value: "es-cluster-0.elasticsearch,es-cluster-1.elasticsearch,es-cluster-2.elasticsearch"

          - name: cluster.initial_master_nodes

            value: "es-cluster-0,es-cluster-1,es-cluster-2"

          - name: ES_JAVA_OPTS

            value: "-Xms512m -Xmx512m"

      initContainers:

      - name: fix-permissions

        image: busybox

        command: ["sh", "-c", "chown -R 1000:1000 /usr/share/elasticsearch/data"]

        securityContext:

          privileged: true

        volumeMounts:

        - name: data

          mountPath: /usr/share/elasticsearch/data

      - name: increase-vm-max-map

        image: busybox

        command: ["sysctl", "-w", "vm.max_map_count=262144"]

        securityContext:

          privileged: true

      - name: increase-fd-ulimit

        image: busybox

        command: ["sh", "-c", "ulimit -n 65536"]

        securityContext:

          privileged: true

  volumeClaimTemplates:

  - metadata:

      name: data

      labels:

        app: elasticsearch

    spec:

      accessModes: [ "ReadWriteOnce" ]

      # storageClassName: ""

      resources:

        requests:

          storage: 3Gi

kubectl create -f es-sts.yaml

 

apiVersion: v1

kind: Service

metadata:

  name: elasticsearch

  labels:

    app: elasticsearch

spec:

  selector:

    app: elasticsearch

  clusterIP: None

  ports:

    - port: 9200

      name: rest

    - port: 9300

      name: inter-node

kubectl create -f es-svc.yaml

 

You can check the PVC status using,

kubectl get pvc

NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
data-es-cluster-0 Bound pvc-fefd5503–72e9–48ed-8ebb-053c45fe372f 3Gi RWO gp2 24h
data-es-cluster-1 Bound pvc-a3c272a1–7135–40dc-a188–87fdf1804550 3Gi RWO gp2 24h
data-es-cluster-2 Bound pvc-837d2edb-159a-4de1–8d14–5b8fbdb67237 3Gi RWO gp2 24h

Once the Elasticsearch pods come into running status,
kubectl get pods
NAME READY STATUS RESTARTS AGE
es-cluster-0 1/1 Running 0 20h
es-cluster-1 1/1 Running 0 20h
es-cluster-2 1/1 Running 0 20h

{
"cluster_name" : "k8s-logs",
"status" : "green",
"timed_out" : false,
"number_of_nodes" : 3,
"number_of_data_nodes" : 3,
"active_primary_shards" : 0,
"active_shards" : 0,
"relocating_shards" : 0,
"initializing_shards" : 0,
"unassigned_shards" : 0,
"delayed_unassigned_shards" : 0,
"number_of_pending_tasks" : 0,
"number_of_in_flight_fetch" : 0,
"task_max_waiting_in_queue_millis" : 0,
"active_shards_percent_as_number" : 100.0
}

Kibana Deployment & Service

apiVersion: apps/v1

kind: Deployment

metadata:

  name: kibana

  labels:

    app: kibana

spec:

  replicas: 1

  selector:

    matchLabels:

      app: kibana

  template:

    metadata:

      labels:

        app: kibana

    spec:

      containers:

      - name: kibana

        image: docker.elastic.co/kibana/kibana:7.5.0

        resources:

          limits:

            cpu: 1000m

          requests:

            cpu: 100m

        env:

          - name: ELASTICSEARCH_URL

            value: http://elasticsearch:9200

        ports:

        - containerPort: 5601
apiVersion: v1

kind: Service

metadata:

  name: kibana-np

spec:

  selector:

    app: kibana

  type: LoadBalancer

  ports:

    - port: 8080

      targetPort: 5601
kubectl get deployment
NAME READY UP-TO-DATE AVAILABLE AGE
kibana 1/1 1 1 24h


kubectl get pods
NAME READY STATUS RESTARTS AGE
es-cluster-0 1/1 Running 0 24h
es-cluster-1 1/1 Running 0 24h
es-cluster-2 1/1 Running 0 24h
kibana-6db5f8d7c8-zxjtf 1/1 Running 0 3h30m

Create the kibana-svc now.

kubectl create -f kibana-svc.yaml

Check if the kibana deployment and pod are running using,

Fluentd Daemon set

apiVersion: rbac.authorization.k8s.io/v1

kind: ClusterRole

metadata:

  name: fluentd

  labels:

    app: fluentd

rules:

- apiGroups:

  - ""

  resources:

  - pods

  - namespaces

  verbs:

  - get

  - list

  - watch

Apply the manifest

kubectl create -f fluentd-role.yaml

Next, is the service account fluentd-sa.yaml.

apiVersion: v1

kind: ServiceAccount

metadata:

  name: fluentd

  labels:

    app: fluentd

kind: ClusterRoleBinding

apiVersion: rbac.authorization.k8s.io/v1

metadata:

  name: fluentd

roleRef:

  kind: ClusterRole

  name: fluentd

  apiGroup: rbac.authorization.k8s.io

subjects:

- kind: ServiceAccount

  name: fluentd

  namespace: default

 

apiVersion: apps/v1

kind: DaemonSet

metadata:

  name: fluentd

  labels:

    app: fluentd

spec:

  selector:

    matchLabels:

      app: fluentd

  template:

    metadata:

      labels:

        app: fluentd

    spec:

      serviceAccount: fluentd

      serviceAccountName: fluentd

      containers:

      - name: fluentd

        image: fluent/fluentd-kubernetes-daemonset:v1.4.2-debian-elasticsearch-1.1

        env:

          - name:  FLUENT_ELASTICSEARCH_HOST

            value: "elasticsearch.default.svc.cluster.local"

          - name:  FLUENT_ELASTICSEARCH_PORT

            value: "9200"

          - name: FLUENT_ELASTICSEARCH_SCHEME

            value: "http"

          - name: FLUENTD_SYSTEMD_CONF

            value: disable

        resources:

          limits:

            memory: 512Mi

          requests:

            cpu: 100m

            memory: 200Mi

        volumeMounts:

        - name: varlog

          mountPath: /var/log

        - name: varlibdockercontainers

          mountPath: /var/lib/docker/containers

          readOnly: true

      terminationGracePeriodSeconds: 30

      volumes:

      - name: varlog

        hostPath:

          path: /var/log

      - name: varlibdockercontainers

        hostPath:

          path: /var/lib/docker/containers
kubectl get pods
NAME READY STATUS RESTARTS AGE
es-cluster-0 1/1 Running 0 24h
es-cluster-1 1/1 Running 0 24h
es-cluster-2 1/1 Running 0 24h
fluentd-d49sw 1/1 Running 0 3h30m
fluentd-pkh2l 1/1 Running 0 3h30m
fluentd-qd6f6 1/1 Running 0 3h31m
fluentd-rvdvx 1/1 Running 0 3h30m
kibana-6db5f8d7c8-zxjtf 1/1 Running 0 3h30m

Test Pod

apiVersion: v1

kind: Pod

metadata:

  name: counter

spec:

  containers:

  - name: count

    image: busybox

    args: [/bin/sh, -c,'i=0; while true; do echo "Thanks for visiting devopscube! $i"; i=$((i+1)); sleep 1; done']

Kibana Dashboard

Leave a Reply

Your email address will not be published. Required fields are marked *