七叶笔记 » golang编程 » docker-compose部署kafka集群

docker-compose部署kafka集群

docker-compose部署kafka集群

1、准备工作

1、准备三台服务器

 192.168.2.100
192.168.2.102
192.168.2.103  

2、安装好docker和docker-compose

3、创建目录

 $ mkdir -p /data/zookeeper && mkdir -p /data/kafka && cd /data/zookeeper  

2、搭建zookeeper集群

  • 192.168.2.100
 cat >docker-compose.yml<<EOF
version: '3.3'
services:
  zoo1:
    image: zookeeper:3.7.0
    restart: always
    hostname: zoo1
    ports:
      - 2181:2181
      - 2888:2888
      - 3888:3888
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=192.168.2.102:2888:3888;2181 server.3=192.168.2.103:2888:3888;2181
      ZOO_HEAP_SIZE: 2048
    volumes:
      - /data/zookeeper/data/:/data/
EOF  
  • 192.168.2.102
 cat >docker-compose.yml<<EOF
version: '3.3'
services:
  zoo2:
    image: zookeeper:3.7.0
    restart: always
    hostname: zoo2
    ports:
      - 2181:2181
      - 2888:2888
      - 3888:3888
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=192.168.2.100:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=192.168.2.103:2888:3888;2181
      ZOO_HEAP_SIZE: 2048
    volumes:
      - /data/zookeeper/data/:/data/
EOF  
  • 192.168.2.103
 cat >docker-compose.yml<<EOF
version: '3.3'
services:
  zoo3:
    image: zookeeper:3.7.0
    restart: always
    hostname: zoo3
    ports:
      - 2181:2181
      - 2888:2888
      - 3888:3888
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=192.168.2.100:2888:3888;2181 server.2=192.168.2.102:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
      ZOO_HEAP_SIZE: 2048
    volumes:
      - /data/zookeeper/data/:/data/
EOF  
  • 各个节点执行
 $ docker-compose -f docker-compose.yml up -d 
$ docker ps |grep zook  
  • 如果想检查zk节点状态就进入容器内查看示例 192.168.2.100 $ docker exec it zookeeper_zoo1_1 bash
    root
    @zoo1: / apache-zookeeper-3 .7.0 bin # cd bin/ && zkServer.sh status
    ZooKeeper JMX enabled by
    default
    Using config
    : / conf / zoo.cfg
    Client port found
    : 2181 . Client address : localhost. Client SSL : false.
    Mode
    : follower 其他节点如此查看 $ docker exec it zookeeper_zoo2_1 bash
    root
    @zoo2: / apache-zookeeper-3 .7.0 bin # cd bin/ && zkServer.sh status
    ZooKeeper JMX enabled by
    default
    Using config
    : / conf / zoo.cfg
    Client port found
    : 2181 . Client address : localhost. Client SSL : false.
    Mode
    : leader $ docker exec it zookeeper_zoo3_1 bash
    root
    @zoo3: / apache-zookeeper-3 .7.0 bin # cd bin/ && zkServer.sh status
    ZooKeeper JMX enabled by
    default
    Using config
    : / conf / zoo.cfg
    Client port found
    : 2181 . Client address : localhost. Client SSL : false.
    Mode
    : follower

3、Kafka集群搭建

 $ cd /data/kafka  
  • 192.168.2.100
 cat >docker-compose.yml<<EOF
version: '2'
services:
  kafka1:
    image: wurstmeister/kafka:2.13-2.7.0
    ports:
      - "9092:9092"
    environment:
      KAFKA_ADVERTISED_HOST_NAME: 192.168.2.100                    ## 修改:宿主机IP
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.2.100:9092    ## 修改:宿主机IP
      KAFKA_ZOOKEEPER_CONNECT: 192.168.2.100:2181, 192.168.2.102:2181, 192.168.2.103:2181 #刚刚安装的zookeeper宿主机IP以及端口
      KAFKA_ADVERTISED_PORT: 9092
      TZ: Asia/Shanghai
      JVM_XMS: 2g
      JVM_XMX: 2g
    container_name: kafka1
EOF  
  • 192.168.2.102
 cat >docker-compose.yml<<EOF
version: '2'
services:
  kafka1:
    image: wurstmeister/kafka:2.13-2.7.0
    ports:
      - "9092:9092"
    environment:
      KAFKA_ADVERTISED_HOST_NAME: 192.168.2.102                    ## 修改:宿主机IP
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.2.102:9092    ## 修改:宿主机IP
      KAFKA_ZOOKEEPER_CONNECT: 192.168.2.100:2181, 192.168.2.102:2181, 192.168.2.103:2181 #刚刚安装的zookeeper宿主机IP以及端口
      KAFKA_ADVERTISED_PORT: 9092
      TZ: Asia/Shanghai
      JVM_XMS: 2g
      JVM_XMX: 2g
    container_name: kafka2
EOF  
  • 192.168.2.103
 cat >docker-compose.yml<<EOF
version: '2'
services:
  kafka1:
    image: wurstmeister/kafka:2.13-2.7.0
    ports:
      - "9092:9092"
    environment:
      KAFKA_ADVERTISED_HOST_NAME: 192.168.2.103                    ## 修改:宿主机IP
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.2.103:9092    ## 修改:宿主机IP
      KAFKA_ZOOKEEPER_CONNECT: 192.168.2.100:2181, 192.168.2.102:2181, 192.168.2.103:2181 #刚刚安装的zookeeper宿主机IP以及端口
      KAFKA_ADVERTISED_PORT: 9092
      TZ: Asia/Shanghai
      JVM_XMS: 2g
      JVM_XMX: 2g
    container_name: kafka3
EOF  

各个节点执行

 $ docker-compose -f docker-compose.yml up -d  

4、kafka-manager

  • 192.168.2.100
 cat >kafka-manager.yml<<EOF

version: '2'
services:
  kafka-manager:
    image: sheepkiller/kafka-manager              ## 镜像:开源的web管理kafka集群的界面
    environment:
        ZK_HOSTS: 192.168.2.100                 ## 修改:宿主机IP
    ports:
      - "9100:9000"                               ## 暴露端口
EOF  
 $ docker-compose -f kafka-manager.yml up -d  

web 页面打开

5、监控zookeeper

添加配置

方法一

1、192.168.2.100示例

 $ docker exec -it zookeeper_zoo1_1 bash
root@zoo1:/apache-zookeeper-3.7.0-bin# cd ..
root@zoo1:/# cd conf/
root@zoo1:/conf# echo 4lw.commands.whitelist=*>>zoo.cfg  
 $ docker restart zookeeper_zoo1_1  

2、192.168.2.102示例

 $ docker exec -it zookeeper_zoo2_1 bash
root@zoo1:/apache-zookeeper-3.7.0-bin# cd ..
root@zoo1:/# cd conf/
root@zoo1:/conf# echo 4lw.commands.whitelist=*>>zoo.cfg  
 $ docker restart zookeeper_zoo2_1  

3、192.168.2.103示例

 $ docker exec -it zookeeper_zoo3_1 bash
root@zoo1:/apache-zookeeper-3.7.0-bin# cd ..
root@zoo1:/# cd conf/
root@zoo1:/conf# echo 4lw.commands.whitelist=*>>zoo.cfg  
 $ docker restart zookeeper_zoo3_1  

方法二

1、192.168.2.100示例

 $ mkdir /data/zookeeper/conf/ -p  
 cat >conf/zoo.cfg<<EOF
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
server.1=0.0.0.0:2888:3888;2181
server.2=192.168.2.102:2888:3888;2181
server.3=192.168.2.103:2888:3888;2181
4lw.commands.whitelist=*
EOF  
 $ chown -R 1000:1000 conf/  
 cat >docker-compose.yml<<EOF
version: '3.3'
services:
  zoo1:
    image: zookeeper:3.7.0
    restart: always
    hostname: zoo1
    ports:
      - 2181:2181
      - 2888:2888
      - 3888:3888
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=192.168.2.102:2888:3888;2181 server.3=192.168.2.103:2888:3888;2181
      ZOO_HEAP_SIZE: 2048
    volumes:
      - /data/zookeeper/data/:/data/
      - /data/zookeeper/conf/zoo.cfg:/conf/zoo.cfg
EOF  
 $ docker-compose -f docker-compose.yml down
$ docker-compose -f docker-compose.yml up -d  
  • 测试
 $ echo mntr |nc 192.168.2.100 2181  

2、192.168.2.102示例

 $ mkdir /data/zookeeper/conf/ -p  
 cat >conf/zoo.cfg<<EOF
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
server.1=192.168.2.100:2888:3888;2181
server.2=0.0.0.0:2888:3888;2181
server.3=192.168.2.103:2888:3888;2181
4lw.commands.whitelist=*
EOF  
 $ chown -R 1000:1000 conf/  
 cat >docker-compose.yml<<EOF
version: '3.3'
services:
  zoo2:
    image: zookeeper:3.7.0
    restart: always
    hostname: zoo2
    ports:
      - 2181:2181
      - 2888:2888
      - 3888:3888
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=192.168.2.100:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=192.168.2.103:2888:3888;2181
      ZOO_HEAP_SIZE: 2048
    volumes:
      - /data/zookeeper/data/:/data/
      - /data/zookeeper/conf/zoo.cfg:/conf/zoo.cfg
EOF  
 $ docker-compose -f docker-compose.yml down
$ docker-compose -f docker-compose.yml up -d  
  • 测试
 $ echo mntr |nc 192.168.2.102 2181  

3、192.168.2.103示例

 $ mkdir /data/zookeeper/conf/ -p  
 cat >conf/zoo.cfg<<EOF
dataDir=/data
dataLogDir=/datalog
tickTime=2000
initLimit=5
syncLimit=2
autopurge.snapRetainCount=3
autopurge.purgeInterval=0
maxClientCnxns=60
standaloneEnabled=true
admin.enableServer=true
server.1=192.168.2.100:2888:3888;2181
server.2=192.168.2.102:2888:3888;2181
server.3=0.0.0.0:2888:3888;2181
4lw.commands.whitelist=*
EOF  
 $ chown -R 1000:1000 conf/  
 cat >docker-compose.yml<<EOF
version: '3.3'
services:
  zoo3:
    image: zookeeper:3.7.0
    restart: always
    hostname: zoo3
    ports:
      - 2181:2181
      - 2888:2888
      - 3888:3888
    environment:
      TZ: Asia/Shanghai
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=192.168.2.100:2888:3888;2181 server.2=192.168.2.102:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
      ZOO_HEAP_SIZE: 2048
    volumes:
      - /data/zookeeper/data/:/data/
      - /data/zookeeper/conf/zoo.cfg:/conf/zoo.cfg
EOF  
 $ docker-compose -f docker-compose.yml down
$ docker-compose -f docker-compose.yml up -d  
  • 测试
 $ echo mntr |nc 192.168.2.103 2181  

zookeeper的exporter

 cat zk-exporter-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: zookeeper-exporter
  namespace: monitoring  
 cat zk-exporter-serviceMonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
  labels:
    k8s-app: zookeeper-exporter
  name: zookeeper-exporter
  namespace: monitoring
spec:
  endpoints:
  - interval: 30s
    port: http
  jobLabel: k8s-app
  selector:
    matchLabels:
      k8s-app: zookeeper-exporter  
 cat zk-exporter-service.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: zookeeper-exporter
  name: zookeeper-exporter
  namespace: monitoring
spec:
  type: NodePort
  ports:
  - name: http
    port: 8080
    nodePort: 30021
    targetPort: http
  selector:
    k8s-app: zookeeper-exporter  
 cat zk-exporter.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: zookeeper-exporter
  namespace: monitoring
  labels:
    k8s-app: zookeeper-exporter
spec:
  selector:
    matchLabels:
      k8s-app: zookeeper-exporter
  template:
    metadata:
      labels:
        k8s-app: zookeeper-exporter
    spec:
      containers:
      - name: zookeeper-exporter
        image: bitnami/zookeeper-exporter:latest
        args: ["-zk-list","192.168.2.100:2181,192.168.2.102:2181,192.168.2.103:2181"]
        ports:
        - containerPort: 8080
          name: http  
 kubectl create -f .  

prometheus-config配置

增加如下

     - job_name: 'zookeeper'
      static_configs:
      - targets: ['192.168.2.101:30021']  
 $ kubectl apply -f prometheus-configmap.yaml
$ curl -X POST 10.244.2.59:9090/-/reload  

grafana模板

 11442 #为grafana模板  

进去后自己调整

问题

1、prometheus监控提示

解决:

把所有zookeeper节点停掉 等prometheus的targets没有了之后再全部启动zookeeper

6、监控kafka

 cat kafka-exporter-account.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
  name: kafka-exporter
  namespace: monitoring  
 cat kafka-exporter-service.yaml
apiVersion: v1
kind: Service
metadata:
  labels:
    k8s-app: kafka-exporter
  name: kafka-exporter
  namespace: monitoring
spec:
  type: NodePort
  ports:
  - name: http
    port: 9308
    nodePort: 30020
    targetPort: http
  selector:
    k8s-app: kafka-exporter  
 cat kafka-exporter-serviceMonitor.yaml
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
  labels:
    k8s-app: kafka-exporter
  name: kafka-exporter
  namespace: monitoring
spec:
  endpoints:
  - interval: 30s
    port: http
  jobLabel: k8s-app
  selector:
    matchLabels:
      k8s-app: kafka-exporter  
 cat kafka-exporter.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: kafka-exporter
  namespace: monitoring
  labels:
    k8s-app: kafka-exporter
spec:
  selector:
    matchLabels:
      k8s-app: kafka-exporter
  template:
    metadata:
      labels:
        k8s-app: kafka-exporter
    spec:
      containers:
      - name: kafka-exporter
        image: bitnami/kafka-exporter:latest
        args: ["--kafka.server","192.168.2.100:9092"]
        ports:
        - containerPort: 9308
          name: http  
 kubectl create -f .  
     - job_name: 'kafka'
      static_configs:
      - targets: ['192.168.2.101:30020']  
 kubectl apply -f prometheus-configmap.yaml
curl -X POST 10.244.2.59:9090/-/reload  
 12460 #grafana模板  

相关文章