|NO.Z.00010|——————————|Deployment|——|Hadoop&OLAP数据库管理系统.v10|——|Kylin.v01|zookeeper&kafka|集群搭建|


一、集群搭建:搭建设计
### --- 分配三台Linux,用于安装拥有三个节点的Kafka集群。

Hadoop01    192.168.1.121
Hadoop02    192.168.1.122
Hadoop03    192.168.1.123

一、部署kafka集群:配置hosts文件
### --- 以上三台主机的/etc/hosts配置:

[root@hadoop01 ~]# vim /etc/hosts
192.168.1.121 hadoop01
192.168.1.122 hadoop02
192.168.1.123 hadoop03
二、安装JDK环境:Kafka安装与配置:Java环境为前提
### --- 查看自带的openjdk

~~~     # 查看系统自带的jdk环境
[root@hadoop01 ~]# rpm -qa | grep java
~~~     # 如果有自带的,卸载系统自带的openjdk
[root@hadoop01 ~]# rpm -e java-1.6.0-openjdk-1.6.0.41-1.13.13.1.el6_8.x86_64 tzdata-java-2016j-1.el6.noarch java-1.7.0-openjdk-1.7.0.131-2.6.9.0.el6_8.x86_64 --nodeps
### --- 安装jdk

~~~     # 所有软件安装路径
[root@hadoop01 ~]# mkdir -p /opt/yanqi/servers
~~~     # 所有软件压缩包的存放路路径
[root@hadoop01 ~]# mkdir -p /opt/yanqi/software
[root@hadoop01 ~]# cd /opt/yanqi/software/
~~~     # 上传jdk到/opt/yanqi/software路路径下去,并解压
[root@hadoop01 software]# tar -zxvf jdk-8u231-linux-x64.tar.gz -C ../servers/
### --- 配置环境变量

~~~     # 配置环境变量量
[root@hadoop01 ~]# vim /etc/profile
#JAVA_HOME
export JAVA_HOME=/opt/yanqi/servers/jdk1.8.0_231
export PATH=:$JAVA_HOME/bin:$PATH
~~~     # 修改完成之后记得 source /etc/profile?生效 
[root@hadoop01 ~]# source /etc/profile 
### --- 查看jdk版本

[root@hadoop01 ~]# java -version
java version "1.8.0_231"
Java(TM) SE Runtime Environment (build 1.8.0_231-b11)
Java HotSpot(TM) 64-Bit Server VM (build 25.231-b11, mixed mode)
三、Zookeeper集群搭建
### --- Linux 安装Zookeeper,三台Linux都安装,以搭建Zookeeper集群

~~~     上传zookeeper-3.4.14.tar.gz到Linux:解压并配置zookeeper
### --- Hadoop01部署zookeeper
[root@hadoop01 ~]# cd /opt/yanqi/software/
 
~~~     # 部署zookeeper
[root@hadoop01 software]# tar -zxf zookeeper-3.4.14.tar.gz -C ../servers/
~~~     # 复制zoo_sample.cfg命名为zoo.cfg

[root@hadoop01 ~]# cp /opt/yanqi/servers/zookeeper-3.4.14/conf/zoo_sample.cfg /opt/yanqi/servers/zookeeper-3.4.14/conf/zoo.cfg
~~~     # 创建数据存储目录

[root@hadoop01 ~]# mkdir -p /opt/yanqi/servers/zookeeper-3.4.14/data
[root@hadoop01 ~]# echo 1 > /opt/yanqi/servers/zookeeper-3.4.14/data/myid
~~~     # 编辑zoo.cfg文件

[root@hadoop01 ~]# vim /opt/yanqi/servers/zookeeper-3.4.14/conf/zoo.cfg 
 
dataDir=/opt/yanqi/servers/zookeeper-3.4.14/data

server.1=hadoop01:2888:3888
server.2=hadoop02:2888:3888
server.3=hadoop03:2888:3888
~~~     # 配置环境变量
[root@hadoop01 ~]# vim /etc/profile
#ZOOKEEPER_HOME
export ZOOKEEPER_PREFIX=/opt/yanqi/servers/zookeeper-3.4.14
export PATH=$PATH:$ZOOKEEPER_PREFIX/bin
export ZOO_LOG_DIR=/opt/yanqi/servers/zookeeper-3.4.14/log
 
~~~     # 退出vim,让配置生效
[root@hadoop01 ~]# source /etc/profile
### --- Hadoop02部署zookeeper

~~~     # 将/opt/zookeeper_ms拷贝到hadoop02,hadoop03
[root@hadoop01 ~]# scp -r /opt/yanqi/servers/zookeeper-3.4.14/ hadoop02:/opt/yanqi/servers/
[root@hadoop01 ~]# scp -r /opt/yanqi/servers/zookeeper-3.4.14/ hadoop03:/opt/yanqi/servers/
[root@hadoop02 ~]# mkdir -p /opt/yanqi/servers/zookeeper-3.4.14/data
[root@hadoop02 ~]# echo 2 > /opt/yanqi/servers/zookeeper-3.4.14/data/myid
~~~     # 配置环境变量
[root@hadoop02 ~]# vim /etc/profile
#ZOOKEEPER_MS
export ZOOKEEPER_PREFIX=/opt/yanqi/servers/zookeeper-3.4.14
export PATH=$PATH:$ZOOKEEPER_PREFIX/bin
export ZOO_LOG_DIR=/opt/yanqi/servers/zookeeper-3.4.14/log
 
~~~     # 退出vim 让配置生效
[root@hadoop02 ~]# source /etc/profile
### --- Hadoop03部署zookeeper

[root@hadoop03 ~]# mkdir -p /opt/yanqi/servers/zookeeper-3.4.14/data
[root@hadoop03 ~]# echo 3 > /opt/yanqi/servers/zookeeper-3.4.14/data/myid
~~~     # 配置环境变量
[root@hadoop03 ~]# vim /etc/profile
#ZOOKEEPER_MS
export ZOOKEEPER_PREFIX=/opt/yanqi/servers/zookeeper-3.4.14
export PATH=$PATH:$ZOOKEEPER_PREFIX/bin
export ZOO_LOG_DIR=/opt/yanqi/servers/zookeeper-3.4.14/log
 
~~~     # 退出vim 让配置生效
[root@hadoop03 ~]# source /etc/profile
### --- 启动zookeeper

~~~     # 在三台Linux上启动Zookeeper
[root@hadoop01 ~]# zkServer.sh start
[root@hadoop02 ~]# zkServer.sh start
[root@hadoop03 ~]# zkServer.sh start
~~~     # 在三台Linux上查看Zookeeper的状态

[root@hadoop01 ~]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/yanqi/servers/zookeeper-3.4.14/bin/../conf/zoo.cfg
Mode: follower
[root@hadoop02 ~]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/yanqi/servers/zookeeper-3.4.14/bin/../conf/zoo.cfg
Mode: follower
[root@hadoop03 ~]# zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /opt/yanqi/servers/zookeeper-3.4.14/bin/../conf/zoo.cfg
Mode: leader
### --- 编辑zk启动脚本

[root@hadoop01 ~]# vim zk.sh 
#!/bin/sh
 
echo "start zookeeper server..."
if(($#==0));then
echo "no params";
exit;
fi
hosts="hadoop01 hadoop02 hadoop03"

for host in $hosts
do
ssh $host "source /etc/profile; /opt/yanqi/servers/zookeeper-3.4.14/bin/zkServer.sh $1"
done
四、kafka集群搭建
### --- 安装Kafka:上传并解压Kafka到

~~~     # 在Hadoop01节点部署kafka
[root@hadoop01 ~]# cd /opt/yanqi/software/
[root@hadoop01 software]# tar -zxvf kafka_2.12-1.0.2.tgz -C /opt/yanqi/servers/
[root@hadoop01 ~]# mv /opt/yanqi/servers/kafka_2.12-1.0.2/ /opt/yanqi/servers/kafka_2.12
~~~     # 拷贝到其它节点

~~~     # 拷贝到hadoop02和hadoop03
[root@hadoop01 ~]# scp -r /opt/yanqi/servers/kafka_2.12/ Hadoop02:/opt/yanqi/servers/
[root@hadoop01 ~]# scp -r /opt/yanqi/servers/kafka_2.12/ Hadoop03:/opt/yanqi/servers/
~~~     # 配置环境变量

~~~     # 配置环境变量,三台Linux都要配置
[root@hadoop01 ~]# vim /etc/profile
##KAFKA_HOME
export KAFKA_HOME=/opt/yanqi/servers/kafka_2.12
export PATH=$PATH:$KAFKA_HOME/bin
~~~     # 让配置生效

[root@hadoop01 ~]# source /etc/profile
### --- 配置Kafka

~~~     # Hadoop01配置
[root@hadoop01 ~]# vim /opt/yanqi/servers/kafka_2.12/config/server.properties
broker.id=0
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://hadoop01:9092
log.dirs=/opt/yanqi/servers/kafka_2.12/kafka-logs
zookeeper.connect=hadoop01:2181,hadoop02:2181,hadoop03:2181/myKafka
~~~     # 其他使用默认配置
~~~     # Hadoop02配置

[root@hadoop02 ~]# vim /opt/yanqi/servers/kafka_2.12/config/server.properties
broker.id=1
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://hadoop02:9092
log.dirs=/opt/yanqi/servers/kafka_2.12/kafka-logs
zookeeper.connect=hadoop01:2181,hadoop02:2181,hadoop03:2181/myKafka
~~~     # 其他使用默认配置
~~~     # Hadoop03配置

[root@hadoop03 ~]# vim /opt/yanqi/servers/kafka_2.12/config/server.properties
broker.id=2
listeners=PLAINTEXT://:9092
advertised.listeners=PLAINTEXT://hadoop03:9092
log.dirs=/opt/yanqi/servers/kafka_2.12/kafka-logs
zookeeper.connect=hadoop01:2181,hadoop02:2181,hadoop03:2181/myKafka
~~~     # 其他使用默认配置
五、启动kafka集群并验证
### --- 启动Kafka

~~~     Cluster Id是一个唯一的不可变的标志符,用于唯一标志一个Kafka集群。
~~~     该Id最多可以有22个字符组成,字符对应于URL-safe Base64。
~~~     Kafka 0.10.1版本及之后的版本中,在集群第一次启动的时候,Broker从
~~~     Zookeeper的/cluster/id节点获取。如果该Id不存在,就自动生成一个新的。
root@hadoop01 ~]# kafka-server-start.sh /opt/yanqi/servers/kafka_2.12/config/server.properties
~~~     # Hadoop01节点的Cluster Id:
INFO Cluster ID = 5CZTD5JMTpuw9mjgpTVimQ (kafka.server.KafkaServer)

~~~     # Hadoop02节点的Cluster Id:
root@hadoop02 ~]# kafka-server-start.sh /opt/yanqi/servers/kafka_2.12/config/server.properties
INFO Cluster ID = 5CZTD5JMTpuw9mjgpTVimQ (kafka.server.KafkaServer)

~~~     # Hadoop03节点的Cluster Id:
root@hadoop03 ~]# kafka-server-start.sh /opt/yanqi/servers/kafka_2.12/config/server.properties
INFO Cluster ID = 5CZTD5JMTpuw9mjgpTVimQ (kafka.server.KafkaServer) 
### --- 验证kafka集群
~~~     若是某一个节点,它会重新选举leader,数字会变成2,
~~~     若发生多次leader选举,数值就会变成几

~~~     # 1是就集群纪元数值,
[zk: localhost:2181(CONNECTED) 0] get /myKafka/controller_epoch
1
[root@hadoop01 ~]# zkCli.sh
[zk: localhost:2181(CONNECTED) 6] get /myKafka/cluster/id
{"version":"1","id":"5CZTD5JMTpuw9mjgpTVimQ"}
[root@hadoop02 ~]# zkCli.sh
[zk: localhost:2181(CONNECTED) 6] get /myKafka/cluster/id
{"version":"1","id":"5CZTD5JMTpuw9mjgpTVimQ"}
[root@hadoop03 ~]# zkCli.sh
[zk: localhost:2181(CONNECTED) 6] get /myKafka/cluster/id
{"version":"1","id":"5CZTD5JMTpuw9mjgpTVimQ"}
~~~     # 登录zookeeper客户端,查看kafka集群信息

[root@hadoop01 ~]# zkCli.sh 
[zk: localhost:2181(CONNECTED) 1] ls /myKafka/brokers/ids
[0, 1, 2]
~~~     # 查看每个Broker的信息:Hadoop01节点在Zookeeper上的信息:

[zk: localhost:2181(CONNECTED) 2] get /myKafka/brokers/ids/0
{"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://hadoop01:9092"],"jmx_port":-1,"host":"hadoop01","timestamp":"1632506889514","port":9092,"version":4}
cZxid = 0x100000026
~~~     # 查看每个Broker的信息:Hadoop02节点在Zookeeper上的信息:

[zk: localhost:2181(CONNECTED) 3] get /myKafka/brokers/ids/1 
{"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://hadoop02:9092"],"jmx_port":-1,"host":"hadoop02","timestamp":"1632506894168","port":9092,"version":4}
cZxid = 0x10000002f
~~~     # 查看每个Broker的信息:Hadoop03节点在Zookeeper上的信息:

[zk: localhost:2181(CONNECTED) 4] get /myKafka/brokers/ids/2
{"listener_security_protocol_map":{"PLAINTEXT":"PLAINTEXT"},"endpoints":["PLAINTEXT://hadoop03:9092"],"jmx_port":-1,"host":"hadoop03","timestamp":"1632506892547","port":9092,"version":4}
cZxid = 0x10000002b

                 
Walter Savage Landor:strove with none,for none was worth my strife.Nature I loved and, next to Nature, Art:I warm'd both hands before the fire of life.It sinks, and I am ready to depart                                                                                                                                                    ——W.S.Landor
 

相关