kafka集群配置--3台机器
查看有没有安装jdk
rpm -qa | grep jdk
有的话卸载
yum -y remove XXX
安装jdk
rpm -ivh jdk-8u431-linux-x64.rpm
配置环境变量
vim ~/.bashrc
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
source ~/.bashrc
安装同步时钟插件
yum install ntp -y
同步
ntpdate ntp1.aliyun.com
clock -w
cd /usr/local
tar -zxf zookeeper-3.4.6.tar.gz -C ./
cd zookeeper-3.4.6
cp conf/zoo_sample.cfg conf/zoo.cfg
vim zoo.cfg
#修改
dataDir=/usr/local/zookeeper-3.4.6/zkdata
#添加
server.1=CentOS7_Master_Kafka_002:2888:3888
server.2=CentOS7_Master_Kafka_003:2888:3888
server.3=CentOS7_Master_Kafka_004:2888:3888
保存退出
三台按照顺序执行
echo 1 > /usr/local/zookeeper-3.4.6/zkdata/myid
echo 2 > /usr/local/zookeeper-3.4.6/zkdata/myid
echo 3 > /usr/local/zookeeper-3.4.6/zkdata/myid
三台按照顺序启动
cd /usr/local/zookeeper-3.4.6
./bin/zkServer.sh start conf/zoo.cfg
./bin/zkServer.sh status
cd /usr/local
tar -zxf kafka_2.11-2.2.0.tgz -C ./
cd kafka_2.11-2.2.0
vim config/server.properties
#修改
listeners=PLAINTEXT://CentOS7_Master_Kafka_002:9092
#修改-没有自己创建
log.dirs=/usr/local/kafka_2.11-2.2.0/kafka-logs
#修改
zookeeper.connect=CentOS7_Master_Kafka_002:2181,CentOS7_Master_Kafka_003:2181,CentOS7_Master_Kafka_004:2181
机器2执行
cd /usr/local
cd kafka_2.11-2.2.0
vim config/server.properties
#修改
broker.id=1
#修改
listeners=PLAINTEXT://CentOS7_Master_Kafka_003:9092
机器3执行
cd /usr/local
cd kafka_2.11-2.2.0
vim config/server.properties
#修改
broker.id=2
#修改
listeners=PLAINTEXT://CentOS7_Master_Kafka_004:9092
三台顺序启动
cd /usr/local/kafka_2.11-2.2.0
./bin/kafka-server-start.sh -daemon config/server.properties
[root@CentOS7_Master_Kafka_002 local]# jps
4596 Kafka
3590 QuorumPeerMain
5789 Jps
安装一台服务器上就行。
cd /usr/local
tar -zxf kafka-eagle-bin-3.0.1.tar.gz
mv kafka-eagle-bin-3.0.1 kafka-eagle
配置环境变量
vi ~/.bashrc
KE_HOME=/usr/kafka-eagle
JAVA_HOME=/usr/java/latest
PATH=$PATH:$JAVA_HOME/bin:$KE_HOME/bin
CLASSPATH=.
export JAVA_HOME
export PATH
export CLASSPATH
export KE_HOME
source ~/.bashrc
修改kafka配置文件-三台都要修改
vim /usr/local/kafka_2.11-2.2.0/bin/kafka-server-start.sh
# 找到以下部分
if [ "x$KAFKA_HEAP_OPTS" = "x" ]; then
export KAFKA_HEAP_OPTS="-server -Xms2G -Xmx2G -XX:PermSize=128m -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -XX:ParallelGCThreads=8 -XX:ConcGCThreads=5 -XX:InitiatingHeapOccupancyPercent=70"
export JMX_PORT="7788" #添加这一行,在kafka-server-start.sh把这个注释去掉
#export KAFKA_HEAP_OPTS="-Xmx1G -Xms1G"
fi
修改eagle配置文件
cd /usr/local/kafka-eagle/
vi conf/system-config.properties
efak.zk.cluster.alias=cluster1
cluster1.zk.list=CentOS7_Master_Kafka_002:2181,CentOS7_Master_Kafka_003:2181,CentOS7_Master_Kafka_004:2181
#cluster2.zk.list=xdn10:2181,xdn11:2181,xdn12:2181
#cluster2.efak.sasl.enable=false
#cluster2.efak.sasl.protocol=SASL_PLAINTEXT
#cluster2.efak.sasl.mechanism=PLAIN
#cluster2.efak.sasl.jaas.config=org.apache.kafka.common.security.plain.PlainLoginModule required username="kafka" password="kafka-eagle";
#cluster2.efak.sasl.client.id=
#cluster2.efak.blacklist.topics=
#cluster2.efak.sasl.cgroup.enable=false
#cluster2.efak.sasl.cgroup.topics=
#cluster3.efak.ssl.enable=false
#cluster3.efak.ssl.protocol=SSL
#cluster3.efak.ssl.truststore.location=
#cluster3.efak.ssl.truststore.password=
#cluster3.efak.ssl.keystore.location=
#cluster3.efak.ssl.keystore.password=
#cluster3.efak.ssl.key.password=
#cluster3.efak.ssl.endpoint.identification.algorithm=https
#cluster3.efak.blacklist.topics=
#cluster3.efak.ssl.cgroup.enable=false
#cluster3.efak.ssl.cgroup.topics=
#数据库自己修改
efak.driver=com.mysql.cj.jdbc.Driver
efak.url=jdbc:mysql://127.0.0.1:3306/userke?useUnicode=true&characterEncoding=UTF-8&zeroDateTimeBehavior=convertToNull
efak.username=root
efak.password=123456
保存退出
chmod u+x bin/ke.sh
./bin/ke.sh start
#启动成功会打印:::
已解压: media/css/public/fonts/boxicons.woff2
已解压: media/css/public/fonts/boxicons.svg
已解压: media/js/public/plus/all.min.js
[2025-01-07 18:27:02] INFO: Port Progress: [##################################################] | 100%
[2025-01-07 18:27:06] INFO: Config Progress: [##################################################] | 100%
[2025-01-07 18:27:09] INFO: Startup Progress: [##################################################] | 100%
[2025-01-07 18:26:59] INFO: Status Code[0]
[2025-01-07 18:26:59] INFO: [Job done!]
Welcome to
______ ______ ___ __ __
/ ____/ / ____/ / | / //_/
/ __/ / /_ / /| | / ,<
/ /___ / __/ / ___ | / /| |
/_____/ /_/ /_/ |_|/_/ |_|
( Eagle For Apache Kafka® )
Version v3.0.1 -- Copyright 2016-2022
*******************************************************************
* EFAK Service has started success.
* Welcome, Now you can visit 'http://192.168.42.139:8048' # 监控大屏登录地址
* Account:admin ,Password:123456
*******************************************************************
* <Usage> ke.sh [start|status|stop|restart|stats] </Usage>
* <Usage> https://www.kafka-eagle.org/ </Usage>
*******************************************************************
#确认是否启动成功
./bin/ke.sh status
[2025-01-07 18:27:19] INFO : EFAK-192.168.42.139 is running, [2569] .
#启动完成 自己查看连接的mysql有没有userke数据库
cd /usr/local
wget https://dlcdn.apache.org/flume/1.11.0/apache-flume-1.11.0-bin.tar.gz
tar -zxf apache-flume-1.11.0-bin.tar.gz
创建kafka配置文件
cd apache-flume-1.11.0-bin
vim conf/kafka.propertis
文件内容如下:
a1.sources = r1
a1.sinks = k1
a1.channels = c1
a1.sources.r1.type = avro
a1.sources.r1.bind = CentOS7_Master_Kafka_001
a1.sources.r1.port = 44444
a1.channels.c1.type = memory
a1.channels.c1.capacity = 10000
a1.channels.c1.transactionCapacity = 10000
a1.channels.c1.byteCapacityBufferPercentage = 20
a1.channels.c1.byteCapacity = 800000
a1.sinks.k1.type = org.apache.flume.sink.kafka.KafkaSink
a1.sinks.k1.kafka.topic = topic01
a1.sinks.k1.kafka.bootstrap.servers = CentOS7_Master_Kafka_001:9092
a1.sinks.k1.kafka.flumeBatchSize = 20
a1.sinks.k1.kafka.producer.acks = -1
a1.sinks.k1.kafka.producer.linger.ms = 1
a1.sinks.k1.kafka.producer.compression.type = snappy
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
保证zookeeper和kafak启动着
cd /usr/local/kafka_2.11-2.2.0
创建topic
./bin/kafka-topics.sh --bootstrap-server CentOS7_Master_Kafka_001:9092 --create --topic topic01 --partitions 3 --replication-factor 1
创建消费者
./bin/kafka-console-consumer.sh --bootstrap-server CentOS7_Master_Kafka_001:9092 --topic topic01 --group group1
启动flum
cd /usr/local/apache-flume-1.11.0-bin
#执行完这个后可能什么都不打印
./bin/flume-ng agent -c conf/ -n a1 -f conf/kafka.propertis -Dflume.root.logger=INFO,console
#新开窗口,查看日志
tail -f flume.log
#新开窗口验证
cd /usr/local/apache-flume-1.11.0-bin
./bin/flume-ng avro-client --host CentOS7_Master_Kafka_001 --port 44444 --filename ./flume.log
#正确结果为:刚才创建的kafak消费者会打印出flume.log的内容。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。
原创声明:本文系作者授权腾讯云开发者社区发表,未经许可,不得转载。
如有侵权,请联系 cloudcommunity@tencent.com 删除。