kafka4.0集群部署
kafka4.0是最新版kafka,可在kafka官网下载,依赖的jdk版本要求在jdk17及jdk17以上
tar -xzf kafka_2.13-4.0.0.tgz
mv kafka_2.13-4.0.0 kafka
cd kafka
# 随便一台节点运行生成随机uuid,后面每台节点都要使用此uuid
bin/kafka-storage.sh random-uuid 生成的uuid(IyyjPwZcTa2LHKkV1rj5pg)
# 每个节点需要在config/server.properties中配置相关内容
node.id
controller.quorum.voters=1@192.168.10.10:9093,2@192.168.10.20:9093,3@192.168.10.30:9093
log.dirs=/opt/kafka/log/kraft-combined-logs
num.partitions=16
# 每台节点设置日志目录的格式,$KAFKA_CLUSTER_ID应为上面生成的值,每个节点都要用相同的uuid
bin/kafka-storage.sh format -t IyyjPwZcTa2LHKkV1rj5pg -c config/server.properties
# 启动 Kafka 服务,每台节点都执行
bin/kafka-server-start.sh -daemon /opt/kafka/config/server.properties
# 测试kafka集群
# 创建主题名为cluster-topic存储事件
bin/kafka-topics.sh --bootstrap-server 192.168.10.10:9092 --create --topic cluster-topic1 --partitions 3 --replication-factor 3
# 在其他节点(9092)查询该topic
bin/kafka-topics.sh --describe --topic cluster-topic1 --bootstrap-server 192.168.10.20:9092
# 将事件写入主题
bin/kafka-console-producer.sh --topic cluster-topic1 --bootstrap-server 192.168.10.10:9092
This is my first event
This is my second event
# 读取事件
bin/kafka-console-consumer.sh --topic cluster-topic --from-beginning --bootstrap-server 192.168.10.10:9092
# 删除主题
bin/kafka-topics.sh --bootstrap-server 192.168.10.10:9092 --delete --topic cluster-topic
# 配置systemd服务
# 先关闭kafka服务
/opt/kafka/bin/kafka-server-stop.sh config/server.properties
# 配置服务
cat >>/etc/systemd/system/kafka.service << EOF[Unit]Description=Apache Kafka Service (KRaft Mode)After=network.target[Service]Type=simpleUser=rootGroup=rootExecStart=/opt/kafka/bin/kafka-server-start.sh /opt/kafka/config/server.propertiesExecStop=/opt/kafka/bin/kafka-server-stop.shRestart=on-abnormalWorkingDirectory=/opt/kafkaEnvironment="JAVA_HOME=/usr/local/jdk-17.0.12"[Install]WantedBy=multi-user.targetEOFsystemctl daemon-reloadsystemctl start kafkasystemctl status kafka