-
Notifications
You must be signed in to change notification settings - Fork 62
Deploy Kafka cluster (Data Lake stack)
John Sundarraj edited this page Sep 10, 2024
·
2 revisions
# Allowed values for $ENVIRONMENT variable.
* stg
* qaa
* qab
# Available files containing environment variables.
* STG.env
* QAA.env
* QAB.env
# Store environment variables.
$ export ENVIRONMENT=stg
# Switch to Kubernetes starter-kit directory.
$ cd /opt/kickstart-kubernetes
# Store Kubernetes variables as environment variables.
$ source var/STG.env
# Switch to Kubernetes starter-kit directory.
$ cd /opt/kickstart-kubernetes
# Add labels to Kubernetes node.
$ kubectl label nodes sloopstash-k8s-mtr-1 type=on-premise provider=host service=virtualbox region=local availability_zone=local-a
$ kubectl label nodes sloopstash-k8s-wkr-1 type=on-premise provider=host service=virtualbox region=local availability_zone=local-b node-role.kubernetes.io/worker=worker
$ kubectl label nodes sloopstash-k8s-wkr-2 type=on-premise provider=host service=virtualbox region=local availability_zone=local-c node-role.kubernetes.io/worker=worker
# Create Kubernetes namespace.
$ kubectl create namespace sloopstash-${ENVIRONMENT}-data-lake-s2
# Create directories for Kubernetes persistent-volumes on worker nodes.
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/controller/0/data
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/controller/0/log
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/controller/1/data
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/controller/1/log
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/controller/2/data
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/controller/2/log
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/broker/0/data
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/broker/0/log
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/broker/1/data
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/broker/1/log
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/broker/2/data
$ sudo mkdir -p /mnt/sloopstash/${ENVIRONMENT}/data-lake/kafka/broker/2/log
$ sudo chmod -R ugo+rwx /mnt/sloopstash
# Create Kubernetes persistent-volume.
$ envsubst < persistent-volume/data-lake/kafka/controller.yml | kubectl apply -f -
$ envsubst < persistent-volume/data-lake/kafka/broker.yml | kubectl apply -f -
# Create Kubernetes config-map.
$ kubectl create configmap kafka-controller \
--from-file=workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/conf/ \
--from-file=workload/kafka/${DATA_LAKE_KAFKA_VERSION}/controller/script/ \
--from-file=supervisor-server=workload/supervisor/conf/server.conf \
-n sloopstash-${ENVIRONMENT}-data-lake-s2
$ kubectl create configmap kafka-broker \
--from-file=workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/conf/ \
--from-file=workload/kafka/${DATA_LAKE_KAFKA_VERSION}/broker/script/ \
--from-file=supervisor-server=workload/supervisor/conf/server.conf \
-n sloopstash-${ENVIRONMENT}-data-lake-s2
# Create Kubernetes service.
$ kubectl apply -f service/data-lake/kafka.yml -n sloopstash-${ENVIRONMENT}-data-lake-s2
# Create Kubernetes stateful-set.
$ envsubst < stateful-set/data-lake/kafka/controller.yml | kubectl apply -f - -n sloopstash-${ENVIRONMENT}-data-lake-s2
$ envsubst < stateful-set/data-lake/kafka/broker.yml | kubectl apply -f - -n sloopstash-${ENVIRONMENT}-data-lake-s2
# Access Bash shell of existing OCI container running Kafka controller node 0.
$ kubectl exec -ti -n sloopstash-${ENVIRONMENT}-data-lake-s2 kafka-controller-0 -c main -- /bin/bash
# Switch to Kafka source directory.
$ cd /usr/local/lib/kafka
# Access Kafka metadata shell.
$ ./bin/kafka-metadata-shell.sh --snapshot /opt/kafka/data/__cluster_metadata-0/00000000000000000000.log
# List Kafka broker nodes.
>> ls brokers
>> exit
# Exit shell.
$ exit
# Access Bash shell of existing OCI container running Kafka broker node 0.
$ kubectl exec -ti -n sloopstash-${ENVIRONMENT}-data-lake-s2 kafka-broker-0 -c main -- /bin/bash
# Switch to Kafka source directory.
$ cd /usr/local/lib/kafka
# Create Kafka topic.
$ ./bin/kafka-topics.sh --create --topic sloopengine-product-update --if-not-exists --partitions 3 --replication-factor 2 --bootstrap-server 0.0.0.0:9092
# Exit shell.
$ exit
# Access Bash shell of existing OCI container running Kafka broker node 1.
$ kubectl exec -ti -n sloopstash-${ENVIRONMENT}-data-lake-s2 kafka-broker-1 -c main -- /bin/bash
# Switch to Kafka source directory.
$ cd /usr/local/lib/kafka
# Write or publish message to Kafka topic.
$ ./bin/kafka-console-producer.sh --topic sloopengine-product-update --bootstrap-server 0.0.0.0:9092
> SloopEngine IDE v2.1.4 has been released.
> SloopEngine IDE protects your source code from developers.
# Exit shell.
$ exit
# Access Bash shell of existing OCI container running Kafka broker node 2.
$ kubectl exec -ti -n sloopstash-${ENVIRONMENT}-data-lake-s2 kafka-broker-2 -c main -- /bin/bash
# Switch to Kafka source directory.
$ cd /usr/local/lib/kafka
# Read or stream message from Kafka topic.
$ ./bin/kafka-console-consumer.sh --topic sloopengine-product-update --from-beginning --bootstrap-server 0.0.0.0:9092
# Exit shell.
$ exit
# Switch to Kubernetes starter-kit directory.
$ cd /opt/kickstart-kubernetes
# List resources under Kubernetes namespace.
$ kubectl get pvc,cm,sts,deploy,rs,ds,po,svc,ep,ing -o wide -n sloopstash-${ENVIRONMENT}-data-lake-s2
# Delete Kubernetes namespace.
$ kubectl delete namespace sloopstash-${ENVIRONMENT}-data-lake-s2
# Delete Kubernetes persistent-volume using template.
$ envsubst < persistent-volume/data-lake/kafka/controller.yml | kubectl delete -f -
$ envsubst < persistent-volume/data-lake/kafka/broker.yml | kubectl delete -f -