Docker部署Kafka集群
1. 自己打包部署
1.1 Docker部署单机Kafka
部署代码
目录结构:
./
├── dockerfile
├── kafka_2.12
├── run.sh
└── sources.list
source.list(阿里镜像)
deb http://mirrors.aliyun.com/ubuntu/ xenial main restricted
deb http://mirrors.aliyun.com/ubuntu/ xenial-updates main restricted
deb http://mirrors.aliyun.com/ubuntu/ xenial universe
deb http://mirrors.aliyun.com/ubuntu/ xenial-updates universe
deb http://mirrors.aliyun.com/ubuntu/ xenial multiverse
deb http://mirrors.aliyun.com/ubuntu/ xenial-updates multiverse
deb http://mirrors.aliyun.com/ubuntu/ xenial-backports main restricted universe multiverse
deb http://mirrors.aliyun.com/ubuntu xenial-security main restricted
deb http://mirrors.aliyun.com/ubuntu xenial-security universe
deb http://mirrors.aliyun.com/ubuntu xenial-security multiverse
dockerfile
FROM ubuntu:16.04
# 修改更改源为阿里云
COPY sources.list /etc/apt/sources.list
COPY kafka_2.12-2.8.1 /kafka_2.12-2.8.1
# 安装jdk
RUN apt-get update && apt-get install -y openjdk-8-jdk --allow-unauthenticated && apt-get clean all
EXPOSE 8001:9092
EXPOSE 8002:2181
# 添加启动脚本
ADD run.sh .
RUN chmod 755 run.sh
ENTRYPOINT [ "/run.sh" ]
run.sh
#!/bin/bash
# 启动自带的zookeeper
cd /kafka_2.12-2.8.1
bin/zookeeper-server-start.sh config/zookeeper.properties &
# 启动kafka
sleep 3
bin/kafka-server-start.sh config/server.properties &
# 前台进程,防止docker闪退
top
启动容器
$ cd ./kafka_server_test
$ docker build -t kafka_server_test .
$ docker run -d -it kafka_server_test
$ docker exec -it 1bda5e6 bash
# 进入容器
$ jps
821 Jps
7 QuorumPeerMain
382 Kafka
1.2 Docker部署kafka集群
目录结构
./mycluster
├── zookeeper-cluster
├── dockerfile
├── kafka_2.12(直接用kafka自带的zookeeper)
├── zookeeper-cluster.properties
├── run.sh
├── sources.list
└── docker-compose-zookeeper.yml
├── broker-cluster
├── dockerfile
├── kafka_2.12
├── broker-cluster.properties
├── run.sh
├── sources.list
└── docker-compose.yml
创建网关mykafka-subnet
$ docker network create --subnet 171.168.0.9/16 --gateway 171.168.0.1 mykafka-subnet
hostname | ip | port |
---|---|---|
网关 | 171.168.0.1 | port |
zoo1 | 171.168.1.1 | 2184:2181 |
zoo2 | 171.168.1.2 | 2185:2181 |
zoo3 | 171.168.1.3 | 2186:2181 |
kafka1 | 171.168.2.1 | 9092:9092 |
kafka2 | 171.168.2.2 | 9093:9093 |
kafka3 | 171.168.2.3 | 9094:9094 |
Zookeeper集群
zookeeper-cluster.properties
tickTime=2000
dataDir=/tmp/zookeeper
clientPort=2181
initLimit=5
syncLimit=2
run.sh
#!/bin/bash
cd /kafka_2.12-2.8.1
# -------------- 1 设置集群 ---------------------
# -------------- 1.1 设置集群地址 ---------------------
string=${ZOO_SERVERS}
echo " 参数: ${string}"
echo "---------------------------------------"
# $IFS变量是LINUX系统默认变量,代表空格,制表符,换行符
# 先存储默认的$IFS变量
BAK_IFS=$IFS
#设置分隔符
IFS=" "
#自动根据分隔符进行分割
arr=($string)
IFS=$BAK_IFS
echo $arr
echo "------------------------------------"
# Print each value of the array by using the loop
for val in ${arr[@]};
do
echo $val >> ./config/zookeeper-cluster.properties
echo $val
done
# -------------- 1.2 设置zookeeper编号 ------------------
---
mkdir /tmp/zookeeper
touch /tmp/zookeeper/myid
echo "${ZOO_MY_ID}" > /tmp/zookeeper/myid
echo "---------------cat --------------------"
cat ./config/zookeeper-cluster.properties
echo "---------------cat --------------------"
# -------------- 2 启动zookeeper ---------------------
bin/zookeeper-server-start.sh /kafka_2.12-2.8.1/config/zookeeper-cluster.properties
s
dockerfile
FROM ubuntu:16.04
# 修改更改源为阿里云
COPY sources.list /etc/apt/sources.list
COPY ./kafka_2.12-2.8.1 /kafka_2.12-2.8.1
COPY ./zookeeper-cluster.properties /kafka_2.12-2.8.1/config/zookeeper-cluster.properties
# 安装jdk
RUN apt-get update && apt-get install -y openjdk-8-jdk --allow-unauthenticated && apt-get clean all
# 添加启动脚本
ADD run.sh .
RUN chmod 755 run.sh
ENTRYPOINT [ "/run.sh" ]
docker-compose-zookeeper.yml
version: '3.4'
services:
zoo1:
build: .
restart: always
hostname: zoo1
container_name: zoo1
ports:
- 2184:2181
volumes:
- "/Users/loserwang/work/zk/data:/data"
- "/Users/loserwang/work/zk/datalog:/datalog"
environment:
ZOO_MY_ID: 1
ZOO_SERVERS: server.1=0.0.0.0:2888:3888 server.2=zoo2:2888:3888 server.3=zoo3:2888:3888
networks:
kafka:
ipv4_address: 171.168.1.1
zoo2:
build: .
restart: always
hostname: zoo2
container_name: zoo2
ports:
- 2185:2181
volumes:
- "/Users/loserwang/work/zk/data:/data"
- "/Users/loserwang/work/zk/datalog:/datalog"
environment:
ZOO_MY_ID: 2
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=0.0.0.0:2888:3888 server.3=zoo3:2888:3888
networks:
kafka:
ipv4_address: 171.168.1.2
zoo3:
build: .
restart: always
hostname: zoo3
container_name: zoo3
ports:
- 2186:2181
volumes:
- "/Users/loserwang/work/zk/data:/data"
- "/Users/loserwang/work/zk/atalog:/datalog"
environment:
ZOO_MY_ID: 3
ZOO_SERVERS: server.1=zoo1:2888:3888 server.2=zoo2:2888:3888 server.3=0.0.0.0:2888:3888
networks:
kafka:
ipv4_address: 171.168.1.3
# 指定已经存在的网络
networks:
kafka:
external:
name: mykafka-subnet
启动容器
$ docker-compose -f docker-compose-zookeeper.yml up -d
Kafka集群
broker-cluster.properties
log.dirs=/kafka/logs
run.sh
#!/bin/bash
cd /kafka_2.12-2.8.1
# 设置kafka集群配置
echo "broker.id=${KAFKA_BROKER_ID}" >> ./config/broker-cluster.properties
echo "zookeeper.connect=${KAFKA_ZOOKEEPER_CONNECT}" >> ./config/broker-cluster.properties
echp "listeners=${KAFKA_LISTENERS}" >> ./config/broker-cluster.properties
# 启动kafka
bin/kafka-server-start.sh config/broker-cluster.properties
dockerfile
FROM ubuntu:16.04
# 修改更改源为阿里云
COPY sources.list /etc/apt/sources.list
COPY ./kafka_2.12-2.8.1 /kafka_2.12-2.8.1
COPY ./broker-cluster.properties /kafka_2.12-2.8.1/config/broker-cluster.properties
# 安装jdk
RUN apt-get update && apt-get install -y openjdk-8-jdk --allow-unauthenticated && apt-get clean all
# 添加启动脚本
ADD run.sh .
RUN chmod 755 run.sh
ENTRYPOINT [ "/run.sh" ]
docker-compose-kafka.yml
version: '3.4'
services:
kafka1:
build: .
restart: always
hostname: kafka1
container_name: kafka1
privileged: true
ports:
- 9092:9092
environment:
KAFKA_BROKER_ID: 1
KAFKA_LISTENERS: PLAINTEXT://kafka1:9092
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
volumes:
- /Users/loserwang/work/zk/logs:/kafka
networks:
kafka:
ipv4_address: 171.168.2.1
extra_hosts:
zoo1: 171.168.1.1
zoo2: 171.168.1.2
zoo3: 171.168.1.3
kafka2:
build: .
restart: always
hostname: kafka2
container_name: kafka2
privileged: true
ports:
- 9093:9093
environment:
KAFKA_BROKER_ID: 2
KAFKA_LISTENERS: PLAINTEXT://kafka2:9093
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
volumes:
- /Users/loserwang/work/zk/logs:/kafka
networks:
kafka:
ipv4_address: 171.168.2.2
extra_hosts:
zoo1: 171.168.1.1
zoo2: 171.168.1.2
zoo3: 171.168.1.3
kafka3:
build: .
restart: always
hostname: kafka3
container_name: kafka3
privileged: true
ports:
- 9094:9094
environment:
KAFKA_BROKER_ID: 3
KAFKA_LISTENERS: PLAINTEXT://kafka3:9094
KAFKA_ZOOKEEPER_CONNECT: zoo1:2181,zoo2:2181,zoo3:2181
volumes:
- /Users/loserwang/work/zk/logs:/kafka
networks:
kafka:
ipv4_address: 171.168.2.3
extra_hosts:
zoo1: 171.168.1.1
zoo2: 171.168.1.2
zoo3: 171.168.1.3
networks:
kafka:
external:
name: mykafka-subnet
启动容器
$ docker-compose -f docker-compose-kafka.yml up -d
验证
$ ./kafka-topics.sh --create --zookeeper localhost:2184 --replication-factor 1 --partitions 1 --topic mytest
Created topic mytest.
$ ./kafka-topics.sh --zookeeper 127.0.0.1:2185 --list
mytest
2. 官方镜像部署
官方说明:https://developer.confluent.io/quickstart/kafka-docker/
- 设置:docker-compose.yml
---
version: '3'
services:
zookeeper:
image: confluentinc/cp-zookeeper:7.3.0
container_name: zookeeper
ports:
- "2181:2181"
environment:
ZOOKEEPER_CLIENT_PORT: 2181
ZOOKEEPER_TICK_TIME: 2000
broker:
image: confluentinc/cp-kafka:7.3.0
container_name: broker
ports:
# To learn about configuring Kafka for access across networks see
# https://www.confluent.io/blog/kafka-client-cannot-connect-to-broker-on-aws-on-docker-etc/
- "9092:9092"
depends_on:
- zookeeper
environment:
KAFKA_BROKER_ID: 1
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: PLAINTEXT:PLAINTEXT,PLAINTEXT_INTERNAL:PLAINTEXT
KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://localhost:9092,PLAINTEXT_INTERNAL://broker:29092
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
KAFKA_TRANSACTION_STATE_LOG_MIN_ISR: 1
KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR: 1
- 启动container:
$ docker-compose up -d
- 创建topic
$ docker exec broker \
kafka-topics --bootstrap-server broker:9092 \
--create \
--topic quickstart