Hadoop四到十章总和
四、Hadoop安装
将软件包hadoop和jdk传入
1.配置网络信息、主机名以及主机名与IP地址的映射关系(在所有节点上执行)
vi /etc/sysconfig/network-scripts/ifcfg-网卡名
TYPE=Ethernet
NAME=网卡名
DEVICE=网卡名
BOOTPROTO=static
ONBOOT=yes
IPADDR=你自己的IP
NETMASK=255.255.255.0
GATEWAY=你自己的网关
DNS1=114.114.114.114
# 保存以上配置后执行以下命令
ifdown 网卡名;ifup 网卡名
hostnamectl set-hostname 主机名.example.com
bash
hostname
vi /etc/hosts
10.10.10.128 master master.example.com
10.10.10.129 slave1 slave1.example.com
10.10.10.130 slave2 slave2.example.com
# 保存以上配置后执行以下命令
ping master
ping slave1
ping slave2
2.关闭防火墙与SELinux(在所有节点上执行)
systemctl disable --now firewalld
setenforce 0
vi /etc/selinux/config
SELINUX=disabled
3.安装hadoop(在master节点上执行)
tar xf jdk-8u152-linux-x64.tar.gz -C /usr/local/src/
tar xf hadoop-2.7.1.tar.gz -C /usr/local/src/
cd /usr/local/src/
mv jdk1.8.0_152 jdk
mv hadoop-2.7.1 hadoop
vi /etc/profile.d/hadoop.sh
export JAVA_HOME=/usr/local/src/jdk
export HADOOP_HOME=/usr/local/src/hadoop
export PATH=${JAVA_HOME}/bin:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin:$PATH
# 保存以上配置后执行以下命令
source /etc/profile.d/hadoop.sh
echo $PATH
vi /usr/local/src/hadoop/etc/hadoop/hadoop-env.sh
export JAVA_HOME=/usr/local/src/jdk
4.配置hdfs-site.xml文件参数(在master上执行)
vi /usr/local/src/hadoop/etc/hadoop/hdfs-site.xml
dfs.namenode.name.dir
file:/usr/local/src/hadoop/dfs/name
dfs.datanode.data.dir
file:/usr/local/src/hadoop/dfs/data
dfs.replication
2
# 保存以上配置后执行以下命令
mkdir -p /usr/local/src/hadoop/dfs/{name,data}
5.配置core-site.xml文件参数(在master上执行)
vi /usr/local/src/hadoop/etc/hadoop/core-site.xml
fs.defaultFS
hdfs://master:9000
io.file.buffer.size
131072
hadoop.tmp.dir
file:/usr/local/src/hadoop/tmp
# 保存以上配置后执行以下命令
mkdir -p /usr/local/src/hadoop/tmp
6.配置mapred-site.xml文件参数(在master上执行)
cd /usr/local/src/hadoop/etc/hadoop
cp mapred-site.xml.template mapred-site.xml
vi /usr/local/src/hadoop/etc/hadoop/mapred-site.xml
mapreduce.framework.name
yarn
mapreduce.jobhistory.address
master:10020
mapreduce.jobhistory.webapp.address
master:19888
7.配置yarn-site.xml文件参数(在master上执行)
vi /usr/local/src/hadoop/etc/hadoop/yarn-site.xml
arn.resourcemanager.address
master:8032
yarn.resourcemanager.scheduler.address
master:8030
yarn.resourcemanager.webapp.address
master:8088
yarn.resourcemanager.resource-tracker.address
master:8031
yarn.resourcemanager.admin.address
master:8033
yarn.nodemanager.aux-services
mapreduce_shuffle
yarn.nodemanager.aux-services.mapreduce_shuffle.class
org.apache.hadoop.mapred.ShuffleHandler
8.hadoop的其它相关配置
# 在master上执行以下命令
vi /usr/local/src/hadoop/etc/hadoop/masters
10.10.10.128
# 保存后执行以下命令
vi /usr/local/src/hadoop/etc/hadoop/slaves
10.10.10.129
10.10.10.130
# 保存后执行以下命令
useradd hadoop
echo 'hadoop' | passwd --stdin hadoop
chown -R hadoop.hadoop /usr/local/src
ll /usr/local/src/
# 配置master能够免密登录所有slave节点
ssh-keygen -t rsa
ssh-copy-id root@slave1
ssh-copy-id root@slave2
# 同步/usr/local/src/目录下所有文件至所有slave节点
scp -r /usr/local/src/* root@slave1:/usr/local/src/
scp -r /usr/local/src/* root@slave2:/usr/local/src/
scp /etc/profile.d/hadoop.sh root@slave1:/etc/profile.d/
scp /etc/profile.d/hadoop.sh root@slave2:/etc/profile.d/
# 在所有slave节点上执行以下命令
useradd hadoop
echo 'hadoop' | passwd --stdin hadoop
chown -R hadoop.hadoop /usr/local/src
ll /usr/local/src/
source /etc/profile.d/hadoop.sh
echo $PATH
五、Hadoop集群运行
#master上操作
su - hadoop
cd /usr/local/src/hadoop/
./bin/hdfs namenode -format
hadoop-daemon.sh start namenode
hadoop-daemon.sh start secondarynamenode
jps
#看到NameNode和SecondayNameNode
#slave1上操作
su - hadoop
hadoop-daemon.sh start datanode
jps
#slave2上操作
su - hadoop
hadoop-daemon.sh start datanode
jps
#看到DataNode
#master上操作
su - hadoop
hdfs dfsadmin -report
ssh-keygen -t rsa
ssh-copy-id slave1
ssh-copy-id slave2
ssh-copy-id master
stop-dfs.sh
start-dfs.sh
start-yarn.sh
jps
#master上看到ResourceManager,slave上看到NodeManager
hdfs dfs -mkdir /input
hdfs dfs -ls /
mkdir ~/input
vi ~/input/data.txt
Hello World
Hello Hadoop
Hello Huasan
hdfs dfs -put ~/input/data.txt /input
hdfs dfs -cat /input/data.txt
hadoop jar /usr/local/src/hadoop/share/hadoop/mapreduce/hadoop-mapreduce-examples-2.7.1.jar wordcount /input/data.txt /output
#执行完看到map为100%,reduce为100%,还要看到successfully
六、Hive组建
一、检查进程是否运行
在所有节点执行:
su - hadoop
jps
#要在master上看到NameNode、Secondarynamenode、ResourceManager三个进程,要在slave1、slave2上看到DataNode、Nodemanager进程
若进程没运行输入以下命令:
start-all.sh
二、卸载MariaDB数据库
[root@master ~]# rpm -qa | grep mariadb
mariadb-libs-5.5.52-2.el7.x86_64
[root@master ~]# rpm -e --nodeps mariadb-libs-5.5.56-2.el7.x86_64
三、部署mysql
master主机部署
#安装unzip
yum -y install unzip
#进入software路径
cd software/
#解压mysql压缩包
unzip mysql-5.7.18.zip
#进入mysql路径
cd mysql-5.7.18
#安装mysql
yum -y install *.rpm
#配置数据库配置
vi /etc/my.cnf
default-storage-engine=innodb
innodb_file_per_table
collation-server=utf8_general_ci
init-connect='SET NAMES utf8'
character-set-server=utf8
#设置开机自启
systemctl enable --now mysqld
#查看默认密码
cat /var/log/mysqld.log|grep password
#初始化
mysql_secure_installation
输入/var/log/mysqld.log密码
y
Password123!
Password123!
y
y
n
y
y
#进入数据库
mysql -uroot -p'Password123!'
#添加 root用户本地访问授权
mysql> grant all on *.* to 'root'@'localhost' identified by 'Password123!';
#添加root用户远程访问授权
mysql> grant all on *.* to 'root'@'%' identified by 'Password123!';
#查询root用户授权情况
mysql> flush privileges;
#退出
mysql> quit
四、测试mysql
slave1上部署
#安装Mariadb
yum -y install mariadb
#测试
mysql -uroot -p'Password123!' -h10.10.10.128
五、安装hive组件
master主机部署/usr/local/src/
#解压Apache压缩包到
tar xf software/apache-hive-2.0.0-bin.tar.gz -C /usr/local/src/
#进入/usr/local/src
cd /usr/local/src/
#移动并更名为hive
mv apache-hive-2.0.0-bin/ hive
#设置归属用户和用户组
chown -R hadoop.hadoop /usr/local/src/
#配置hive.sh文件
vi /etc/profile.d/hive.sh
export HIVE_HOME=/usr/local/src/hive
export PATH=${HIVE_HOME}/bin:$PATH
#执行
source /etc/profile.d/hive.sh
#查看是否成功
echo $PATH
#切换hadoop用户
su - hadoop
#进入/usr/local/src/hive/conf/
cd /usr/local/src/hive/conf/
#复制
cp hive-default.xml.template hive-site.xml
#配置配置文件
vi hive-site.xml
#配置数据库连接
javax.jdo.option.ConnectionURL
jdbc:mysql://master:3306/hive?createDatabaseIfNotExist=true&useSSL=false
JDBC connect string for a JDBC metastore
#配置root密码
javax.jdo.option.ConnectionPassword
Password123!
password to use against metastore database
#配置元数据存储版本一致,若默认false,不修改
hive.metastore.schema.verification
false
Enforce metastore schema version consistency.
True: Verify that version information stored in metastore matches with one fr
om Hive jars. Also disable automatic
schema migration attempt. Users are required to manually migrate schema
after Hive upgrade which ensures
proper metastore schema migration. (Default)
False: Warn if the version information stored in metastore doesn't match with
one from in Hive jars.
#配置驱动
javax.jdo.option.ConnectionDriverName
com.mysql.jdbc.Driver
Driver class name for a JDBC metastore
#配置数据库用户名为root
javax.jdo.option.ConnectionUserName
root
Username to use against metastore database
#替换
hive.querylog.location
/usr/local/src/hive/tmp
Location of Hive run time structured log file
hive.exec.local.scratchdir
/usr/local/src/hive/tmp
Local scratch space for Hive jobs
hive.downloaded.resources.dir
/usr/local/src/hive/tmp/resources
Temporary local directory for added resources in the remote file s
ystem.
hive.server2.logging.operation.log.location
/usr/local/src/hive/tmp/operation_logs
Top level directory where operation logs are stored if logging fun
ctionality is enabled
#创建文件夹
mkdir -p /usr/local/src/hive/tmp/{resources,operation_logs}
#复制
cp software/mysql-connector-java-5.1.46.jar /usr/local/src/hive/lib/
#停掉进程,用jps命令确认没有进程
stop-all.sh
#启动
start-all.sh
schematool -initSchema -dbType mysql
#看到schemaTool completed表示初始化成功
mysql -uroot -p'Password123!' -e 'show databases;'
hive
hive>
七、Zookeeper安装
一、配置时间同步
#打开配置文件
[root@master ~]# vi /etc/chrony.conf
pool time1.aliyun.com iburst
二、部署zookeeper
master配置
#解压包
[root@master conf]# tar xf zookeeper-3.4.8.tar.gz -C /usr/local/src/
#进入目录
[root@master ~]# cd /usr/local/src/
#移动目录为zookeeper
[root@master src]# mv zookeeper-3.4.8/ zookeeper
#进入下载目录
[root@master src]# cd /usr/local/src/zookeeper/
#创建文件夹
[root@master zookeeper]# mkdir data logs
#写入
[root@master zookeeper]# echo '1' > /usr/local/src/zookeeper/data/myid
#进入配置文件
[root@master zookeeper]# cd /usr/local/src/zookeeper/conf
#复制
[root@master conf]# cp zoo_sample.cfg zoo.cfg
[root@master conf]# vi zoo.cfg
#修改 dataDir参数内容如下:
dataDir=/usr/local/src/zookeeper/data
#在zoo.cfg文件末尾追加以下参数配置,表示三个 ZooKeeper节点的访问端口号
server.1=master:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888:3888
[root@master conf]# vi /etc/profile.d/zookeeper.sh
# 在文件末尾追加
export ZOOKEEPER_HOME=/usr/local/src/zookeeper #ZooKeeper安装目录
export PATH=${ZOOKEEPER_HOME}/bin:$PATH #ZooKeeper可执行程序目录
#提权
[root@master conf]# chown -R hadoop.hadoop /usr/local/src/
#传输
[root@master conf]# scp -r /usr/local/src/zookeeper/ slave1:/usr/local/src/
[root@master conf]# scp -r /usr/local/src/zookeeper/ slave2:/usr/local/src/
[root@master conf]# scp /etc/profile.d/zookeeper.sh slave1:/etc/profile.d/
[root@master conf]# scp /etc/profile.d/zookeeper.sh slave2:/etc/profile.d/
slave配置
#提权
[root@slave1 ~]# chown -R hadoop.hadoop /usr/local/src/
[root@slave1 ~]# ll /usr/local/src/
#提权
[root@slave2 ~]# chown -R hadoop.hadoop /usr/local/src/
[root@slave2 ~]# ll /usr/local/src/
#写入
[root@slave1 ~]# echo '2' > /usr/local/src/zookeeper/data/myid
[root@slave2 ~]# echo '3' > /usr/local/src/zookeeper/data/myid
三、启动zookeeper
#切换用户
[root@master ~]# su - hadoop
Last login: Fri Apr 22 15:04:50 CST 2022 on pts/1
#启动zookeeper
[hadoop@master ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
#查看进程
[hadoop@master ~]$ jps
2612 ResourceManager
3286 Jps
2235 NameNode
2444 SecondaryNameNode
3260 QuorumPeerMain
#查看运行状态
[hadoop@master ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Mode: leader
#切换用户
[root@slave1 ~]# su - hadoop
Last login: Fri Apr 22 15:04:19 CST 2022 on pts/1
#启动zookeeper
[hadoop@slave1 ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
#查看进程
[hadoop@slave1 ~]$ jps
1841 DataNode
2324 Jps
2300 QuorumPeerMain
1967 NodeManager
#查看运行状态
[hadoop@slave1 ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Mode: follower
#切换用户
[root@slave2 ~]# su - hadoop
Last login: Fri Apr 22 15:04:37 CST 2022 on pts/1
#启动zookeeper
[hadoop@slave2 ~]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
#查看进程
[hadoop@slave2 ~]$ jps
2258 QuorumPeerMain
1945 NodeManager
1819 DataNode
2284 Jps
#查看运行状态
[hadoop@slave2 ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Mode: follower
八、HBase安装
一、配置时间同步
master
#安装chrony
yum -y install chrony
#配置配置文件
vi /etc/chrony.conf
pool time1.aliyun.com iburst
#设置开机自启动
systemctl enable --now chronyd
#查看运行状态
systemctl status chronyd
二、部署HBase
master配置
#解压包
tar xf software/hbase-1.2.1-bin.tar.gz -C /usr/local/src/
#进入目录
cd /usr/local/src/
#移动为hbase
mv hbase-1.2.1 hbase
#修改配置文件
vi /etc/profile.d/hbase.sh
export HBASE_HOME=/usr/local/src/hbase export PATH=${HBASE_HOME}/bin:$PATH
#启用
source /etc/profile.d/hbase.sh
echo $PATH
vi hbase-env.sh
export JAVA_HOME=/usr/local/src/jdk
export HBASE_MANAGES_ZK=true
export HBASE_CLASSPATH=/usr/local/src/hadoop/etc/hadoop/
vi hbase-site.xml
hbase.rootdir
hdfs://master:9000/hbase # 使用 9000端口
The directory shared by region servers.
hbase.master.info.port
60010 # 使用 master节点 60010端口
hbase.zookeeper.property.clientPort
2181 # 使用 master节点 2181端口
Property from ZooKeeper's config zoo.cfg. The port at which the clients will connect.
zookeeper.session.timeout
120000 # ZooKeeper超时时间
hbase.zookeeper.quorum
master,slave1,slave2 # ZooKeeper管理节点
hbase.tmp.dir
/usr/local/src/hbase/tmp # HBase临时文件路径
hbase.cluster.distributed
true # 使用分布式 HBase
mkdir -p /usr/local/src/hbase/tmp
vi regionservers
10.10.10.129
10.10.10.130
scp -r /usr/local/src/hbase/ slave1:/usr/local/src/
scp -r /usr/local/src/hbase/ slave2:/usr/local/src/
scp /etc/profile.d/hbase.sh slave1:/etc/profile.d/
scp /etc/profile.d/hbase.sh slave2:/etc/profile.d/
chown -R hadoop.hadoop /usr/local/src/
ll /usr/local/src/ su - hadoop
slave1配置
chown -R hadoop.hadoop /usr/local/src/
ll /usr/local/src/
su - hadoop
slave2配置
chown -R hadoop.hadoop /usr/local/src/
ll /usr/local/src/
su - hadoop
三、启动HBase
start-all.sh
start-hbase.sh
C:\windows\system32\drivers\etc\hosts
10.10.10.128 master
四、HBase语法
步骤一:进入 HBase 命令行
[hadoop@master ~]$ hbase shell
步骤二:建立表 scores,两个列簇:grade 和 course
hbase(main):001:0> create 'scores','grade','course'
0 row(s) in 1.4480 seconds
=> Hbase::Table - scores
步骤三:查看数据库状态
hbase (main) :001 :0> status
1 active master, 0 backup masters, 2 servers, 0 dead, 1.0000 average load
步骤四:查看数据库版本
hbase (main) :002:0> version
1.2.1,r8d8a7107dc4ccbf36a92f64675dc60392f85c015,Wed Mar 30 11:19:21 CDT 2016
步骤五:查看表
hbase(main):008:0> list
TABLE
scores
1 row(s) in 0.0100 seconds
=>["scores"]
步骤六:插入记录 1:jie,grade: 143cloud
hbase(main):003:0> put 'scores','jie','grade:','146cloud'
0 row(s) in 0.2250 seconds
步骤七:插入记录 2:jie,course:math,86
hbase(main):004:0> put 'scores','jie','course:math','86'
0 row(s) in 0.0190 seconds
步骤八:插入记录 3:jie,course:cloud,92
hbase(main):005:0> put 'scores','jie','course:cloud','92'
0 row(s) in 0.0170 seconds
步骤九:插入记录 4:shi,grade:133soft
hbase(main):006:0> put 'scores','shi','grade:','133soft'
0 row(s) in 0.0070 seconds
步骤十:插入记录 5:shi,grade:math,87
hbase(main):007:0> put 'scores','shi','course:math','87'
0 row(s) in 0.0060 seconds
步骤十一:插入记录 6:shi,grade:cloud,96
hbase(main):008:0> put 'scores','shi','course:cloud','96'
0 row(s) in 0.0070 seconds
步骤十二:读取 jie 的记录
hbase(main):009:0> get 'scores','jie'
COLUMN CELL
course:cloud timestamp=1460479208148, value=92
course:math timestamp=1460479163325,value=86
grade: timestamp=1460479064086,value=146cloud
3 row(s) in 0.0800 seconds
步骤十三:读取 jie 的班级
hbase(main):012:0> get 'scores','jie','grade'
COLUMN CELL
grade: timestamp=1460479064086,value=146cloud
1 row( s) in 0.0150 seconds
步骤十四:查看整个表记录
hbase(main):013:0> scan 'scores'
ROW COLUMN+CELL
jie column=course:cloud, timestamp=1460479208148,value=92
jie column-course:math, timestamp=1460479163325, value=86
jie column=grade:,timestamp=1460479064086,value=146cloud
shi column=course:cloud, timestamp=1460479342925,value=96
shi column=course:math, timestamp=1460479312963,value=87
shi column=grade:,timestamp=1460479257429, value=133soft
2 row(s) in 0.0570 seconds
步骤十五:按例查看表记录
hbase(main):014:0> scan 'scores',{COLUMNS=>'course'}
ROW COLUMN+CELL
jie column=course:cloud, timestamp=1460479208148, value=92
jie column=course:math, timestamp=1460479163325, value=86
shi column=course:cloud, timestamp=1460479342925, value=96
shi column=course:math, times tamp=1460479312963, value=87
2 row(s) in 0. 0230 seconds
步骤十六:删除指定记录
hbase(main):015:0> delete 'scores','shi','grade'
0 row(s) in 0.0390 seconds
步骤十七:删除后,执行scan命令
hbase(main):016:0> scan 'scores'
ROW COLUMN+CELL
jie column=course:cloud, timestamp=1460479208148, value=92
jie column=course:math, timestamp=1460479163325, value=86
jie column=grade:, timestamp=1460479064086, value=146cloud
shi column=course:cloud, timestamp=1460479342925, value=96
shi column=course:math, timestamp=1460479312963, value=87
row( s) in 0. 0350 seconds
步骤十八:增加新的列簇
hbase(main):017:0> alter 'scores',NAME=>'age'
Updating all regions with the new schema...
0/ 1 regions updated.
1/ 1 regions updated.
Done.
0 row(s) in 3.0060 seconds
步骤十九:查看表结构
hbase(main):018:0> describe 'scores'
Table scores is ENABLED
scores
COLUMN FAMIL IES DESCRIPTION
{NAME => age',BL O0MFILTER =>ROW',VERSIONS => '1' ,IN_ MEMORY
=> 'false', KEEP DELETED_ CELLS =>FAL SE',DATA BLOCK ENCODING =>
NONETTL => ' FOREVER', COMPRESSION=>NONE',MIN VERSIONS => '0' ,
BLOCKCACHE => ' true BLOCKSIZE =>65536',REPLICATION_ SCOPE =>
'0'}
{NAME =>course,BLOOMFILTER => ' ROW', VERSIONS =>IN MEMORY =>
false', KEEPDELETED CELLS =>FALSE', DATA BLOCK ENCODING =>
1 NONE',TTL =>FOREVER',COMPRESSION =>NONE',MIN VERSIONS => '0'
BLOCKCACHE => 'true' ,BLOCKSIZE =>65536',REPLICATION SCOPE
=>0'}{NAME => grade BLOOMFILTER => 'ROW' ,VERSIONS =>IN_ MEMORY =>
' false', KEEPDELETED CELLS =>FALSE',DATA BLOCK ENCODING =>1 NONE
'TTL =>FOREVER',COMPRESSION =>NONE',MIN VERSIONS => '0'
BLOCKCACHE => 'true' ,BLOCKSIZE => '65536 ,REPLICATION SCOPE
=> '0'}
3 row(s) in 0.0400 seconds
步骤二十:删除列簇
hbase(main):020:0> alter 'scores',NAME=>'age',METHOD=>'delete'
Updating all regions with the new schema…
1/ 1 regions updated.
Done.
0 row(s)in 2.1600seconds
步骤二十一:删除表
hbase(main):021:0> disable 'scores'
0 row(s)in 2.2930seconds
hbase(main):022:0> drop 'scores'
0 row(s)in 1.2530seconds
hbase( main) :023:0> list
TABLE
0 row(s)in 0.0150 seconds
==>[]
步骤二十二:退出
hbase(main):024:0> quit
[hadoop@master ~]$
步骤二十三:关闭 HBase
#在 master节点关闭 HBase
[hadoop@master ~]$ stop-hbase.sh
#在所有节点关闭 ZooKeeper
[hadoop@master ~]$ zkServer.sh stop
[hadoop@slave1 ~]$ zkServer.sh stop
[hadoop@slave2 ~]$ zkServer.sh stop
#在 master节点关闭 Hadoop
[hadoop@master ~]$ stop-all.sh
九、Sqoop组件
一、传Sqoop软件包并解压改名
下载地址:https://mirror-hk.koddos.net/apache/sqoop/
win+R cmd
[root@master ~]# tar -zxvf sqoop-1.4.7.bin__hadoop-2.6.0.tar.gz -C /usr/local/src/
[root@master ~]# cd /usr/local/src/
[root@master src]# mv ./sqoop-1.4.7.bin__hadoop-2.6.0/ sqoop
二、配置Sqoop
创建Sqoop的配置文件sqoop-env.sh
[root@master ~]# cd /usr/local/src/sqoop/conf/
[root@master conf]# cp sqoop-env-template.sh sqoop-env.sh
修改sqoop-env.sh
[root@master conf]# vi sqoop-env.sh
export HADOOP_COMMON_HOME=/usr/local/src/hadoop
export HADOOP_MAPRED_HOME=/usr/local/src/hadoop
export HBASE_HOME=/usr/local/src/hbase
export HIVE_HOME=/usr/local/src/hive
配置环境变量
[root@master conf]# vi /etc/profile.d/sqoop.sh
export SQOOP_HOME=/usr/local/src/sqoop
export PATH=${SQOOP_HOME}/bin:$PATH
连接数据库
[root@master conf]# source /etc/profile.d/sqoop.sh
[root@master conf]# cp /root/software/mysql-connector-java-5.1.46.jar /usr/local/src/sqoop/lib/
三、启动sqoop
启动所有进程
[root@master ~]# su - hadoop
Last login: Fri Apr 22 15:05:13 CST 2022 on pts/1
[hadoop@master ~]$ start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
10.10.10.130: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
10.10.10.129: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-resourcemanager-master.out
10.10.10.130: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
10.10.10.129: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave1.out
[hadoop@master ~]$ jps
2305 ResourceManager
2565 Jps
1926 NameNode
2136 SecondaryNameNode
测试Sqoop连接mysql
[hadoop@master ~]$ sqoop list-databases --connect jdbc:mysql://127.0.0.1:3306 --username root -P
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
22/04/29 15:16:26 INFO sqoop.Sqoop: Running Sqoop version: 1.4.7
Enter password:
22/04/29 15:16:34 INFO manager.MySQLManager: Preparing to use a MySQL streaming resultset.
Fri Apr 29 15:16:34 CST 2022 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
information_schema
mysql
performance_schema
sys
连接hive
[hadoop@master ~]$ cp /usr/local/src/hive/lib/hive-common-2.0.0.jar /usr/local/src/sqoop/lib/
四、Sqoop模板命令
创建mysql数据库和数据表
[hadoop@master ~]$ mysql -uroot -p'Password123!'
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 7
Server version: 5.7.18 MySQL Community Server (GPL)
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> create database sample;
Query OK, 1 row affected (0.00 sec)
mysql> use sample
Database changed
mysql> create table student(number char(9) primary key,name varchar(10));
Query OK, 0 rows affected (0.01 sec)
mysql> insert student values('01','zhangsan'),('02','lisi'),('03','wangwu');
Query OK, 3 rows affected (0.00 sec)
Records: 3 Duplicates: 0 Warnings: 0
mysql> select * from student;
+--------+----------+
| number | name |
+--------+----------+
| 01 | zhangsan |
| 02 | lisi |
| 03 | wangwu |
+--------+----------+
3 rows in set (0.00 sec)
mysql> quit
Bye
创建hive数据库和数据表
[hadoop@master ~]$ hive
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/hive-jdbc-2.0.0-standalone.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/log4j-slf4j-impl-2.4.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Logging initialized using configuration in jar:file:/usr/local/src/hive/lib/hive-common-2.0.0.jar!/hive-log4j2.properties
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
hive> use sample;
OK
Time taken: 0.477 seconds
hive> show tables;
OK
student
Time taken: 0.152 seconds, Fetched: 1 row(s)
hive> select * from student;
OK
01|zhangsan NULL
02|lisi NULL
03|wangwu NULL
Time taken: 0.773 seconds, Fetched: 3 row(s)
hive> quit;
在hive中创建sample数据库和student数据表
[hadoop@master ~]$ hive
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/hive-jdbc-2.0.0-standalone.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/log4j-slf4j-impl-2.4.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Logging initialized using configuration in jar:file:/usr/local/src/hive/lib/hive-common-2.0.0.jar!/hive-log4j2.properties
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
hive> create database sample;
OK
Time taken: 0.694 seconds
hive> use sample;
OK
Time taken: 0.013 seconds
hive> create table student(number STRING,name STRING);
OK
Time taken: 0.23 seconds
hive> exit;
从mysql导出数据,导入hive
[hadoop@master ~]$ sqoop import --connect jdbc:mysql://master:3306/sample --username root --password Password123! --table student --fields-terminated-by '|' --delete-target-dir --num-mappers 1 --hive-import --hive-database sample --hive-table student
从hive导出数据,导入mysql
[hadoop@master ~]$ mysql -uroot -pPassword123! -e 'delete from sample.student;'
[hadoop@master ~]$ sqoop export --connect "jdbc:mysql://master:3306/sample?useUnicode=true&characterEncoding=utf-8" --username root --password Password123! --table student --input-fields-terminated-by '|' --export-dir /user/hive/warehouse/sample.db/student/*
五、Sqoop组件应用
列出mysql所有数据库
[hadoop@master ~]$ sqoop list-databases --connect jdbc:mysql://master:3306 --username root --password Password123!
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
22/04/29 16:44:07 INFO sqoop.Sqoop: Running Sqoop version: 1.4.7
22/04/29 16:44:07 WARN tool.BaseSqoopTool: Setting your password on the command-line is insecure. Consider using -P instead.
22/04/29 16:44:07 INFO manager.MySQLManager: Preparing to use a MySQL streaming resultset.
Fri Apr 29 16:44:07 CST 2022 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
information_schema
hive
mysql
performance_schema
sample
sys
连接mysql并列出sample数据库中的表
[hadoop@master ~]$ sqoop list-tables --connect jdbc:mysql://master:3306/sample --username root --password Password123!
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
22/04/29 16:44:44 INFO sqoop.Sqoop: Running Sqoop version: 1.4.7
22/04/29 16:44:44 WARN tool.BaseSqoopTool: Setting your password on the command-line is insecure. Consider using -P instead.
22/04/29 16:44:44 INFO manager.MySQLManager: Preparing to use a MySQL streaming resultset.
Fri Apr 29 16:44:44 CST 2022 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
student
将关系型数据的表结构复制到hive
[hadoop@master ~]$ sqoop create-hive-table -connect jdbc:mysql://localhost:3306/sample -table student -username root -password Password123! -hive-table test
从关系型数据库导入文件到hive
[hadoop@master ~]$ sqoop import --connect jdbc:mysql://master:3306/sample --username root --password Password123! --table student --delete-target-dir --num-mappers 1 --hive-import --hive-database default --hive-table test
将hive中的表数据导入到mysql中
[hadoop@master ~]$ sqoop export -connect jdbc:mysql://master:3306/sample -username root -password Password123! -table student --input-fields-terminated-by '\001' -export-dir /user/hibe/warehouse/test
从数据库导出表的数据到HDFS上文件
[hadoop@master ~]$ sqoop import -connect jdbc:mysql://master:3306/sample -username root -password Password123! -table student --num-mappers 1 -target-dir /user/test
从数据库增量导入表数据到HDFS
[hadoop@master ~]$ mysql -uroot -pPassword123!
mysql: [Warning] Using a password on the command line interface can be insecure.
Welcome to the MySQL monitor. Commands end with ; or \g.
Your MySQL connection id is 103
Server version: 5.7.18 MySQL Community Server (GPL)
Copyright (c) 2000, 2017, Oracle and/or its affiliates. All rights reserved.
Oracle is a registered trademark of Oracle Corporation and/or its
affiliates. Other names may be trademarks of their respective
owners.
Type 'help;' or '\h' for help. Type '\c' to clear the current input statement.
mysql> use sample;
Reading table information for completion of table and column names
You can turn off this feature to get a quicker startup with -A
Database changed
mysql> insert into student values('04','sss');
Query OK, 1 row affected (0.01 sec)
mysql> insert into student values('05','ss2');
Query OK, 1 row affected (0.00 sec)
mysql> insert into student values('06','ss3');
Query OK, 1 row affected (0.01 sec)
mysql> alter table student modify column number int;
Query OK, 3 rows affected (0.04 sec)
Records: 3 Duplicates: 0 Warnings: 0
mysql> exit;
Bye
[hadoop@master ~]$ sqoop import -connect jdbc:mysql://master:3306/sample -username root -password Password123! -table student --num-mappers 1 -target-dir /user/test -check-column number -incremental append -last-value 0
[hadoop@master ~]$ hdfs dfs -cat /user/test/part-m-00001
十、flume组件
一、下载并传入包
下载地址:https://archive.apache.org/dist/flume/1.6.0/
二、部署flume组件
#解压包
[root@master ~]# tar xf apache-flume-1.6.0-bin.tar.gz -C /usr/local/src/
#进入目录
[root@master ~]# cd /usr/local/src/
#修改名字为flume
[root@master src]# mv apache-flume-1.6.0-bin/ flume
#权限
[root@master src]# chown -R hadoop.hadoop /usr/local/src/
#创建环境变量
[root@master src]# vi /etc/profile.d/flume.sh
export FLUME_HOME=/usr/local/src/flume
export PATH=${FLUME_HOME}/bin:$PATH
查看是否有路径
[root@master src]# su - hadoop
Last login: Fri Apr 29 16:36:50 CST 2022 on pts/1
[hadoop@master ~]$ echo $PATH
/home/hadoop/.local/bin:/home/hadoop/bin:/usr/local/src/zookeeper/bin:/usr/local/src/sqoop/bin:/usr/local/src/hive/bin:/usr/local/src/hbase/bin:/usr/local/src/jdk/bin:/usr/local/src/hadoop/bin:/usr/local/src/hadoop/sbin:/usr/local/src/flume/bin:/usr/local/bin:/usr/bin:/usr/local/sbin:/usr/sbin
#有看到flume的安装路径则表示没问题
三、配置flume
#修改配置文件
[hadoop@master ~]$ vi /usr/local/src/hbase/conf/hbase-env.sh
#export HBASE_CLASSPATH=/usr/local/src/hadoop/etc/hadoop/ 注释这一行
#进入目录
[hadoop@master ~]$ cd /usr/local/src/flume/conf/
#复制并改名为flume-env.sh
[hadoop@master conf]$ cp flume-env.sh.template flume-env.sh
#修改配置文件
[hadoop@master conf]$ vi flume-env.sh
export JAVA_HOME=/usr/local/src/jdk
#启动所有组件
[hadoop@master conf]$ start-all.sh
#查看版本
[hadoop@master conf]$ flume-ng version
Flume 1.6.0
Source code repository: https://git-wip-us.apache.org/repos/asf/flume.git
Revision: 2561a23240a71ba20bf288c7c2cda88f443c2080
Compiled by hshreedharan on Mon May 11 11:15:44 PDT 2015
From source with checksum b29e416802ce9ece3269d34233baf43f
四、使用flume发送接受信息
#进入目录
[hadoop@master conf]$ cd /usr/local/src/flume/
#写入数据
[hadoop@master flume]$ vi /usr/local/src/flume/simple-hdfs-flume.conf
#a1是agent名,r1,k1,c1是a1的三个组件
a1.sources=r1
a1.sinks=k1
a1.channels=c1
#设置r1源文件的类型、路径和文件头属性
a1.sources.r1.type=spooldir
a1.sources.r1.spoolDir=/usr/local/src/hadoop/logs/
a1.sources.r1.fileHeader=true
#设置k1目标存储器属性
a1.sinks.k1.type=hdfs #目标存储器类型hdfs
a1.sinks.k1.hdfs.path=hdfs://master:9000/tmp/flume #目标存储位置
a1.sinks.k1.hdfs.rollsize=1048760 #临时文件达1048760 bytes时,滚动形成目标文件
a1.sinks.k1.hdfs.rollCount=0 #表示不根据events数量1来滚动形成目标文件
a1.sinks.k1.hdfs.rollInterval=900 #间隔900秒将临时文件滚动形成目标文件
a1.sinks.k1.hdfs.useLocalTimeStamp=true #使用本地时间戳
#设置c1暂存容器属性
a1.channels.c1.type=file #使用文件作为暂存容器
a1.channels.c1.capacity=1000
a1.channels.c1.transactionCapacity=100
#使用c1作为源和目标数据的传输通道
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
#运行
[hadoop@master flume]$ flume-ng agent --conf-file simple-hdfs-flume.conf --name a1
查看flume传输到hdfs的文件
[hadoop@master flume]$ hdfs dfs -ls /tmp/flume