본문 바로가기
kubenetes

k8s nifi-1.13.2 zookeeper-3.6.3

by kyeongseo.oh 2022. 10. 2.

k8s에서 zookeeper cluster와 nifi cluster를 실행한다.

 

version

name version
zookeeper 3.6.3
nifi 1.13.2

 

기본 apache nifi 이미지는 flow.xml.gz 파일의 위치 및 각 repository의 위치를 수정할 수 없기에 docker image를 수정함.

nifi image DOCKERFILE : https://github.com/kyeongseooh/k8s-nifi-1.13.2

 

zookeeper.yaml

apiVersion: v1
kind: Service
metadata:
  name: zk-hs
  labels:
    app: zk
spec:
  ports:
  - port: 2888
    name: server
  - port: 3888
    name: leader-election
  clusterIP: None
  selector:
    app: zk
---
apiVersion: v1
kind: Service
metadata:
  name: zk-cs
  labels:
    app: zk
spec:
  ports:
  - port: 2181
    name: client
  selector:
    app: zk
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: zk-pdb
spec:
  selector:
    matchLabels:
      app: zk
  maxUnavailable: 1
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: zk
spec:
  selector:
    matchLabels:
      app: zk
  serviceName: zk-hs
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: Parallel
  updateStrategy:
    type: RollingUpdate
  template:
    metadata:
      labels:
        app: zk
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - zk
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: kubernetes-zookeeper
        imagePullPolicy: IfNotPresent
        image: "ghcr.io/nirmata/kubernetes-zookeeper:v3-zk3.6.3"
        resources:
          requests:
            memory: "1Gi"
            cpu: "0.5"
        ports:
        - containerPort: 2181
          name: client
        - containerPort: 2888
          name: server
        - containerPort: 3888
          name: leader-election
        command:
        - sh
        - -c
        - "start-zookeeper \
          --servers=3 \
          --data_dir=/var/lib/zookeeper/data \
          --data_log_dir=/var/lib/zookeeper/data/log \
          --conf_dir=/opt/zookeeper/conf \
          --client_port=2181 \
          --election_port=3888 \
          --server_port=2888 \
          --tick_time=2000 \
          --init_limit=10 \
          --sync_limit=5 \
          --heap=512M \
          --max_client_cnxns=60 \
          --snap_retain_count=3 \
          --purge_interval=12 \
          --max_session_timeout=40000 \
          --min_session_timeout=4000 \
          --log_level=INFO"
        readinessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        livenessProbe:
          exec:
            command:
            - sh
            - -c
            - "zookeeper-ready 2181"
          initialDelaySeconds: 10
          timeoutSeconds: 5
        volumeMounts:
        - name: datadir
          mountPath: /var/lib/zookeeper
      securityContext:
        runAsUser: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: datadir
    spec:
      accessModes: ["ReadWriteMany"]
      resources:
        requests:
          storage: 2Gi
      storageClassName: "nfs-client"

 

zookeeper 정상 동작 확인

follower, leader 정상적으로 구동

[root@km ~]# for i in {0..2} ; do kubectl exec zk-${i} -it -- zkServer.sh status ; done
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: follower
ZooKeeper JMX enabled by default
Using config: /usr/bin/../etc/zookeeper/zoo.cfg
Client port found: 2181. Client address: localhost. Client SSL: false.
Mode: leader

 

nifi.yaml

apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nifi
spec:
  selector:
    matchLabels:
      app: nifi
  serviceName: nifi-hs
  replicas: 3
  updateStrategy:
    type: RollingUpdate
  podManagementPolicy: OrderedReady
  template:
    metadata:
      labels:
        app: nifi
    spec:
      affinity:
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            - labelSelector:
                matchExpressions:
                  - key: "app"
                    operator: In
                    values:
                    - nifi
              topologyKey: "kubernetes.io/hostname"
      containers:
      - name: nifi
        image: "oks529/nifi:1.13.2"
        env:
        - name: NIFI_CLUSTER_IS_NODE
          value: "true"
        - name: HOSTNAME
          valueFrom:
            fieldRef:
              fieldPath: metadata.name
        - name: NIFI_CLUSTER_ADDRESS
          value: $(HOSTNAME).nifi-hs.default.svc.cluster.local
        - name: NIFI_CLUSTER_NODE_PROTOCOL_PORT
          value: "1025"
        - name: NIFI_WEB_HTTP_HOST
          value: $(HOSTNAME).nifi-hs.default.svc.cluster.local
        #- name: NIFI_WEB_HTTP_PORT
        #  value: "80"
        - name: NIFI_CLUSTER_NODE_PROTOCOL_MAX_THREADS
          value: "100"
        - name: NIFI_ZK_CONNECT_STRING
          value: "zk-0.zk-hs.default.svc.cluster.local:2181,zk-1.zk-hs.default.svc.cluster.local:2181,zk-2.zk-hs.default.svc.cluster.local:2181"
        - name: NIFI_ELECTION_MAX_CANDIDATES
          value: "3"
        - name: NIFI_FLOW_CONFIGURATION_FILE
          value: "/data/flow.xml.gz"
        - name: NIFI_DATABASE_DIRECTORY
          value: "/data/database_repository"
        - name: NIFI_FLOWFILE_REPOSITORY_DIRECTORY
          value: "/data/flowfile_repository"
        - name: NIFI_CONTENT_REPOSITORY_DIRECTORY_DEFAULT
          value: "/data/content_repository"
        - name: NIFI_PROVENANCE_REPOSITORY_DIRECTORY_DEFAULT
          value: "/data/provenance_repository"
        ports:
        - containerPort: 8080
          name: client
        - containerPort: 1025
          name: nodeport
        volumeMounts:
        - name: nifi-data
          mountPath: /data
      securityContext:
        runAsUser: 0
        runAsGroup: 1000
        fsGroup: 1000
  volumeClaimTemplates:
  - metadata:
      name: nifi-data
    spec:
      accessModes: ["ReadWriteMany"]
      resources:
        requests:
          storage: 5Gi
      storageClassName: "nfs-client"		
---
kind: Service
apiVersion: v1
metadata:
  name: nifi
  namespace: default
  labels:
    app: nifi
spec:
  type: NodePort
  selector:
    app: nifi
  ports:
    - protocol: TCP
      port: 8080
      name: nifi
---
kind: Service
apiVersion: v1
metadata:
  name: nifi-hs
spec:
  clusterIP: None
  selector:
    app: nifi
  ports:
    - port: 8081
      targetPort: 8081
      name: nifi-listen-http
    - port: 2881
      targetPort: 2881
      name: nifi-site-protocol
    - port: 2882
      targetPort: 2882
      name: nifi-node-protocol

 

nifi cluster 확인

 

pv 확인

nifi version upgrade 및 장애 복구 시 사용하기 위해 특정 dir 및 file을 pv에 저장한다.

[root@km default-nifi-data-nifi-0-pvc-3d0a6b4f-43bf-44a3-ba9b-1040afcf09d3]# ll
total 48
drwxr-xr-x. 1026 root ddadmin 20480 Oct  2 18:13 content_repository
drwxr-xr-x.    2 root ddadmin   100 Oct  2 18:13 database_repository
drwxr-xr-x.    3 root ddadmin    40 Oct  2 18:14 flowfile_repository
-rw-r--r--.    1 root ddadmin   340 Oct  2 18:14 flow.xml.gz
drwxr-xr-x.    2 root ddadmin     6 Oct  2 18:13 provenance_repository

 

장애 복구 확인

nodeport 30511로 접속 (ip:30511/nifi)

[root@km nfs]# kubectl get svc
NAME         TYPE        CLUSTER-IP       EXTERNAL-IP   PORT(S)                      AGE
kubernetes   ClusterIP   10.96.0.1        <none>        443/TCP                      3d21h
nifi         NodePort    10.108.213.205   <none>        8080:30511/TCP               22m

테스트 용 프로세서 생성

 

kubectl delete -f nifi.yaml

kubectl apply -f nifi.yaml

 

flow 정상 복구 확인

'kubenetes' 카테고리의 다른 글

k8s nifi ingress  (0) 2022.10.06
k8s nifi-1.17.0  (0) 2022.10.03
kubeadm join 시 pending  (0) 2022.09.30
external etcd backup & restore  (0) 2022.09.18
k8s external etcd 구성  (1) 2022.09.17

댓글