Hadoop十三到十五章总和
十三、大数据平台监控
一、监控大数据平台
查看Linux系统信息
[root@master ~]# uname -a
Linux master 4.18.0-373.el8.x86_64 #1 SMP Tue Mar 22 15:11:47 UTC 2022 x86_64 x86_64 x86_64 GNU/Linux
查看硬盘信息
查看分区
[root@master ~]# fdisk -l
Disk /dev/sda: 20 GiB, 21474836480 bytes, 41943040 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disklabel type: dos
Disk identifier: 0x3f653cbf
Device Boot Start End Sectors Size Id Type
/dev/sda1 * 2048 2099199 2097152 1G 83 Linux
/dev/sda2 2099200 41943039 39843840 19G 8e Linux LVM
Disk /dev/mapper/cs-root: 17 GiB, 18249416704 bytes, 35643392 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
Disk /dev/mapper/cs-swap: 2 GiB, 2147483648 bytes, 4194304 sectors
Units: sectors of 1 * 512 = 512 bytes
Sector size (logical/physical): 512 bytes / 512 bytes
I/O size (minimum/optimal): 512 bytes / 512 bytes
查看所有交换分区
[root@master ~]# swapon -s
Filename Type Size Used Priority
/dev/dm-1 partition 2097148 0 -2
查看文件系统占比
[root@master ~]# df -h
Filesystem Size Used Avail Use% Mounted on
devtmpfs 370M 0 370M 0% /dev
tmpfs 389M 0 389M 0% /dev/shm
tmpfs 389M 11M 379M 3% /run
tmpfs 389M 0 389M 0% /sys/fs/cgroup
/dev/mapper/cs-root 17G 5.3G 12G 32% /
/dev/sda1 1014M 210M 805M 21% /boot
tmpfs 78M 0 78M 0% /run/user/0
查看网络IP
[root@master ~]# ifconfig
ens33: flags=4163 mtu 1500
inet 10.10.10.128 netmask 255.255.255.0 broadcast 10.10.10.255
inet6 fe80::20c:29ff:fe4f:1938 prefixlen 64 scopeid 0x20
ether 00:0c:29:4f:19:38 txqueuelen 1000 (Ethernet)
RX packets 326 bytes 29201 (28.5 KiB)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 216 bytes 24513 (23.9 KiB)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
lo: flags=73 mtu 65536
inet 127.0.0.1 netmask 255.0.0.0
inet6 ::1 prefixlen 128 scopeid 0x10
loop txqueuelen 1000 (Local Loopback)
RX packets 8 bytes 720 (720.0 B)
RX errors 0 dropped 0 overruns 0 frame 0
TX packets 8 bytes 720 (720.0 B)
TX errors 0 dropped 0 overruns 0 carrier 0 collisions 0
查看所有监听端口
[root@master ~]# netstat -lntp
Active Internet connections (only servers)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN 958/sshd
tcp6 0 0 :::3306 :::* LISTEN 1247/mysqld
tcp6 0 0 :::22 :::* LISTEN 958/sshd
查看所有已建立的连接
[hadoop@master ~]$ netstat -antp
(No info could be read for "-p": geteuid()=1000 but you should be root.)
Active Internet connections (servers and established)
Proto Recv-Q Send-Q Local Address Foreign Address State PID/Program name
tcp 0 0 0.0.0.0:22 0.0.0.0:* LISTEN -
tcp 0 52 10.10.10.128:22 10.10.10.1:7076 ESTABLISHED -
tcp6 0 0 :::3306 :::* LISTEN -
tcp6 0 0 :::22 :::* LISTEN -
实时显示进程状态
[root@master ~]# top
top - 17:07:10 up 10 min, 1 user, load average: 0.00, 0.03, 0.04
Tasks: 169 total, 1 running, 168 sleeping, 0 stopped, 0 zombie
%Cpu(s): 0.0 us, 0.0 sy, 0.0 ni, 99.9 id, 0.0 wa, 0.0 hi, 0.1 s
MiB Mem : 777.4 total, 116.6 free, 401.0 used, 259.7 buff
MiB Swap: 2048.0 total, 2048.0 free, 0.0 used. 245.6 avai
PID USER PR NI VIRT RES SHR S %CPU %MEM
313 root 0 -20 0 0 0 I 0.3 0.0
960 root 20 0 497104 29348 15336 S 0.3 3.7
1620 root 20 0 153500 5356 4092 S 0.3 0.7
1 root 20 0 174916 13432 8460 S 0.0 1.7
2 root 20 0 0 0 0 S 0.0 0.0
3 root 0 -20 0 0 0 I 0.0 0.0
4 root 0 -20 0 0 0 I 0.0 0.0
6 root 0 -20 0 0 0 I 0.0 0.0
8 root 20 0 0 0 0 I 0.0 0.0
查看CPU信息
[root@master ~]# cat /proc/cpuinfo
processor : 0
vendor_id : GenuineIntel
cpu family : 6
model : 165
model name : Intel(R) Core(TM) i3-10100 CPU @ 3.60GHz
stepping : 3
microcode : 0xcc
cpu MHz : 3600.005
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 0
cpu cores : 4
apicid : 0
initial apicid : 0
fpu : yes
fpu_exception : yes
cpuid level : 22
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 avx2 smep bmi2 invpcid rdseed adx smap clflushopt xsaveopt xsavec xsaves arat md_clear flush_l1d arch_capabilities
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs itlb_multihit
bogomips : 7200.01
clflush size : 64
cache_alignment : 64
address sizes : 43 bits physical, 48 bits virtual
power management:
processor : 1
vendor_id : GenuineIntel
cpu family : 6
model : 165
model name : Intel(R) Core(TM) i3-10100 CPU @ 3.60GHz
stepping : 3
microcode : 0xcc
cpu MHz : 3600.005
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 1
cpu cores : 4
apicid : 1
initial apicid : 1
fpu : yes
fpu_exception : yes
cpuid level : 22
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 avx2 smep bmi2 invpcid rdseed adx smap clflushopt xsaveopt xsavec xsaves arat md_clear flush_l1d arch_capabilities
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs itlb_multihit
bogomips : 7200.01
clflush size : 64
cache_alignment : 64
address sizes : 43 bits physical, 48 bits virtual
power management:
processor : 2
vendor_id : GenuineIntel
cpu family : 6
model : 165
model name : Intel(R) Core(TM) i3-10100 CPU @ 3.60GHz
stepping : 3
microcode : 0xcc
cpu MHz : 3600.005
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 2
cpu cores : 4
apicid : 2
initial apicid : 2
fpu : yes
fpu_exception : yes
cpuid level : 22
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 avx2 smep bmi2 invpcid rdseed adx smap clflushopt xsaveopt xsavec xsaves arat md_clear flush_l1d arch_capabilities
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs itlb_multihit
bogomips : 7200.01
clflush size : 64
cache_alignment : 64
address sizes : 43 bits physical, 48 bits virtual
power management:
processor : 3
vendor_id : GenuineIntel
cpu family : 6
model : 165
model name : Intel(R) Core(TM) i3-10100 CPU @ 3.60GHz
stepping : 3
microcode : 0xcc
cpu MHz : 3600.005
cache size : 6144 KB
physical id : 0
siblings : 4
core id : 3
cpu cores : 4
apicid : 3
initial apicid : 3
fpu : yes
fpu_exception : yes
cpuid level : 22
wp : yes
flags : fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ss ht syscall nx pdpe1gb rdtscp lm constant_tsc arch_perfmon nopl xtopology tsc_reliable nonstop_tsc cpuid pni pclmulqdq ssse3 fma cx16 pcid sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm abm 3dnowprefetch invpcid_single ssbd ibrs ibpb stibp ibrs_enhanced fsgsbase tsc_adjust bmi1 avx2 smep bmi2 invpcid rdseed adx smap clflushopt xsaveopt xsavec xsaves arat md_clear flush_l1d arch_capabilities
bugs : spectre_v1 spectre_v2 spec_store_bypass swapgs itlb_multihit
bogomips : 7200.01
clflush size : 64
cache_alignment : 64
address sizes : 43 bits physical, 48 bits virtual
power management:
查看内存信息
[root@master ~]# cat /proc/meminfo
MemTotal: 796056 kB
MemFree: 120252 kB
MemAvailable: 252360 kB
Buffers: 4204 kB
Cached: 227492 kB
SwapCached: 0 kB
Active: 83512 kB
Inactive: 407888 kB
Active(anon): 1764 kB
Inactive(anon): 268576 kB
Active(file): 81748 kB
Inactive(file): 139312 kB
Unevictable: 0 kB
Mlocked: 0 kB
SwapTotal: 2097148 kB
SwapFree: 2097148 kB
Dirty: 0 kB
Writeback: 0 kB
AnonPages: 259716 kB
Mapped: 95376 kB
Shmem: 10636 kB
KReclaimable: 34272 kB
Slab: 83828 kB
SReclaimable: 34272 kB
SUnreclaim: 49556 kB
KernelStack: 5456 kB
PageTables: 7168 kB
NFS_Unstable: 0 kB
Bounce: 0 kB
WritebackTmp: 0 kB
CommitLimit: 2495176 kB
Committed_AS: 864108 kB
VmallocTotal: 34359738367 kB
VmallocUsed: 0 kB
VmallocChunk: 0 kB
Percpu: 76288 kB
HardwareCorrupted: 0 kB
AnonHugePages: 190464 kB
ShmemHugePages: 0 kB
ShmemPmdMapped: 0 kB
FileHugePages: 0 kB
FilePmdMapped: 0 kB
HugePages_Total: 0
HugePages_Free: 0
HugePages_Rsvd: 0
HugePages_Surp: 0
Hugepagesize: 2048 kB
Hugetlb: 0 kB
DirectMap4k: 124800 kB
DirectMap2M: 923648 kB
DirectMap1G: 0 kB
查看Hadoop状态
#切换Hadoop用户
[root@master ~]# su - hadoop
Last login: Fri May 13 17:05:40 CST 2022 on pts/0
#切换Hadoop安装目录
[hadoop@master ~]$ cd /usr/local/src/hadoop/
#启动Hadoop
[hadoop@master hadoop]$ start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
10.10.10.129: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
10.10.10.130: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: secondarynamenode running as process 1989. Stop it first.
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-resourcemanager-master.out
10.10.10.130: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
10.10.10.129: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave1.out
#关闭Hadoop
[hadoop@master hadoop]$ stop-all.sh
This script is Deprecated. Instead use stop-dfs.sh and stop-yarn.sh
Stopping namenodes on [master]
master: stopping namenode
10.10.10.129: stopping datanode
10.10.10.130: stopping datanode
Stopping secondary namenodes [0.0.0.0]
0.0.0.0: no secondarynamenode to stop
stopping yarn daemons
stopping resourcemanager
10.10.10.129: stopping nodemanager
10.10.10.130: stopping nodemanager
no proxyserver to stop
二、监控大数据平台资源状态
查看YARN状态
#切换Hadoop安装目录
[hadoop@master ~]$ cd /usr/local/src/hadoop/
#启动Zookeeper
[hadoop@master hadoop]$ zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@slave1 ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
[root@slave2 ~]# zkServer.sh start
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Starting zookeeper ... STARTED
#启动Hadoop
[hadoop@master hadoop]$ start-all.sh
This script is Deprecated. Instead use start-dfs.sh and start-yarn.sh
Starting namenodes on [master]
master: starting namenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-namenode-master.out
10.10.10.130: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave2.out
10.10.10.129: starting datanode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-datanode-slave1.out
Starting secondary namenodes [0.0.0.0]
0.0.0.0: starting secondarynamenode, logging to /usr/local/src/hadoop/logs/hadoop-hadoop-secondarynamenode-master.out
starting yarn daemons
starting resourcemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-resourcemanager-master.out
10.10.10.129: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave1.out
10.10.10.130: starting nodemanager, logging to /usr/local/src/hadoop/logs/yarn-hadoop-nodemanager-slave2.out
#JPS查看有NodeManager和ResourceManager进程则表示YARN成功
[hadoop@master hadoop]$ jps
3539 ResourceManager
2984 QuorumPeerMain
3161 NameNode
3371 SecondaryNameNode
3803 Jps
查看HDFS状态
#切换Hadoop安装目录
[hadoop@master ~]$ cd /usr/local/src/hadoop/
#查看HDFS目录
[hadoop@master hadoop]$ ./bin/hdfs dfs -ls /
Found 5 items
drwxr-xr-x - hadoop supergroup 0 2022-04-15 17:30 /hbase
drwxr-xr-x - hadoop supergroup 0 2022-04-15 14:53 /input
drwxr-xr-x - hadoop supergroup 0 2022-04-15 14:53 /output
drwx------ - hadoop supergroup 0 2022-05-06 17:31 /tmp
drwxr-xr-x - hadoop supergroup 0 2022-04-29 17:06 /user
#查看HDFS报告
[hadoop@master hadoop]$ bin/hdfs dfsadmin -report
Configured Capacity: 36477861888 (33.97 GB)
Present Capacity: 30300819456 (28.22 GB)
DFS Remaining: 30297894912 (28.22 GB)
DFS Used: 2924544 (2.79 MB)
DFS Used%: 0.01%
Under replicated blocks: 0
Blocks with corrupt replicas: 0
Missing blocks: 0
Missing blocks (with replication factor 1): 0
-------------------------------------------------
Live datanodes (2):
Name: 10.10.10.129:50010 (slave1)
Hostname: slave1
Decommission Status : Normal
Configured Capacity: 18238930944 (16.99 GB)
DFS Used: 1462272 (1.39 MB)
Non DFS Used: 3130793984 (2.92 GB)
DFS Remaining: 15106674688 (14.07 GB)
DFS Used%: 0.01%
DFS Remaining%: 82.83%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Fri May 13 17:23:05 CST 2022
Name: 10.10.10.130:50010 (slave2)
Hostname: slave2
Decommission Status : Normal
Configured Capacity: 18238930944 (16.99 GB)
DFS Used: 1462272 (1.39 MB)
Non DFS Used: 3046248448 (2.84 GB)
DFS Remaining: 15191220224 (14.15 GB)
DFS Used%: 0.01%
DFS Remaining%: 83.29%
Configured Cache Capacity: 0 (0 B)
Cache Used: 0 (0 B)
Cache Remaining: 0 (0 B)
Cache Used%: 100.00%
Cache Remaining%: 0.00%
Xceivers: 1
Last contact: Fri May 13 17:23:05 CST 2022
#查看HDFS空间情况
[hadoop@master hadoop]$ hdfs dfs -df /
Filesystem Size Used Available Use%
hdfs://master:9000 36477861888 2924544 30297894912 0%
查看HBase状态
启动HBase
#进入HBase安装目录
[hadoop@master ~]$ cd /usr/local/src/hbase/
#查看版本
[hadoop@master hbase]$ hbase version
HBase 1.2.1
Source code repository git://asf-dev/home/busbey/projects/hbase revision=8d8a7107dc4ccbf36a92f64675dc60392f85c015
Compiled by busbey on Wed Mar 30 11:19:21 CDT 2016
From source with checksum f4bb4a14bb4e0b72b46f729dae98a772
#结果显示 HBase1.2.1,说明 HBase 正在运行,版本号为 1.2.1。
#如果没有启动,则执行命令 start-hbase.sh 启动 HBase。
[hadoop@master hbase]$ start-hbase.sh
slave1: starting zookeeper, logging to /usr/local/src/hbase/logs/hbase-hadoop-zookeeper-slave1.out
slave2: starting zookeeper, logging to /usr/local/src/hbase/logs/hbase-hadoop-zookeeper-slave2.out
master: starting zookeeper, logging to /usr/local/src/hbase/logs/hbase-hadoop-zookeeper-master.out
starting master, logging to /usr/local/src/hbase/logs/hbase-hadoop-master-master.out
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
10.10.10.129: starting regionserver, logging to /usr/local/src/hbase/logs/hbase-hadoop-regionserver-slave1.out
10.10.10.130: starting regionserver, logging to /usr/local/src/hbase/logs/hbase-hadoop-regionserver-slave2.out
10.10.10.130: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
10.10.10.130: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
10.10.10.129: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option PermSize=128m; support was removed in 8.0
10.10.10.129: Java HotSpot(TM) 64-Bit Server VM warning: ignoring option MaxPermSize=128m; support was removed in 8.0
[hadoop@master hbase]$ hbase version
HBase 1.2.1
Source code repository git://asf-dev/home/busbey/projects/hbase revision=8d8a7107dc4ccbf36a92f64675dc60392f85c015
Compiled by busbey on Wed Mar 30 11:19:21 CDT 2016
From source with checksum f4bb4a14bb4e0b72b46f729dae98a772
查看HBase版本信息
#进入HBase交互界面
[hadoop@master hbase]$ hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hbase/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell; enter 'help' for list of supported commands.
Type "exit" to leave the HBase Shell
Version 1.2.1, r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11:19:21 CDT 2016
hbase(main):001:0>
#查看版本
hbase(main):001:0> version
1.2.1, r8d8a7107dc4ccbf36a92f64675dc60392f85c015, Wed Mar 30 11:19:21 CDT 2016
查询HBase状态
hbase(main):002:0> status
1 active master, 0 backup masters, 3 servers, 0 dead, 0.6667 average load
查看Hive状态
启动Hive
[hadoop@master ~]$ cd /usr/local/src/hive/
[hadoop@master hive]$ hive
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/hive-jdbc-2.0.0-standalone.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hive/lib/log4j-slf4j-impl-2.4.1.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/src/hadoop/share/hadoop/common/lib/slf4j-log4j12-1.7.10.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.apache.logging.slf4j.Log4jLoggerFactory]
Logging initialized using configuration in jar:file:/usr/local/src/hive/lib/hive-common-2.0.0.jar!/hive-log4j2.properties
Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
hive>
Hive操作基本命令
查看数据库
hive> show databases;
OK
default
sample
Time taken: 0.628 seconds, Fetched: 2 row(s)
查看default数据库所有表
hive> use default;
OK
Time taken: 0.025 seconds
hive> show tables;
OK
test
Time taken: 0.05 seconds, Fetched: 1 row(s)
创建表stu,表的id为整数型,name为字符型
hive> create table stu(id int,name string);
OK
Time taken: 0.382 seconds
为表stu插入一条信息,id号为001,name为张三
hive> insert into stu values(1001,"zhangsan");
WARNING: Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
Query ID = hadoop_20220517143507_8a20256e-ac94-49f9-8c4c-93a86d341936
Total jobs = 3
Launching Job 1 out of 3
Number of reduce tasks is set to 0 since there's no reduce operator
Starting Job = job_1652768861914_0001, Tracking URL = http://master:8088/proxy/application_1652768861914_0001/
Kill Command = /usr/local/src/hadoop/bin/hadoop job -kill job_1652768861914_0001
Hadoop job information for Stage-1: number of mappers: 1; number of reducers: 0
2022-05-17 14:35:44,996 Stage-1 map = 0%, reduce = 0%
2022-05-17 14:35:50,379 Stage-1 map = 100%, reduce = 0%, Cumulative CPU 2.58 sec
MapReduce Total cumulative CPU time: 2 seconds 580 msec
Ended Job = job_1652768861914_0001
Stage-4 is selected by condition resolver.
Stage-3 is filtered out by condition resolver.
Stage-5 is filtered out by condition resolver.
Moving data to: hdfs://master:9000/user/hive/warehouse/stu/.hive-staging_hive_2022-05-17_14-35-35_416_5475258551476738478-1/-ext-10000
Loading data to table default.stu
MapReduce Jobs Launched:
Stage-Stage-1: Map: 1 Cumulative CPU: 2.58 sec HDFS Read: 4138 HDFS Write: 81 SUCCESS
Total MapReduce CPU Time Spent: 2 seconds 580 msec
OK
Time taken: 17.501 seconds
插入数据后查看表的信息
hive> show tables;
OK
stu
test
values__tmp__table__1
Time taken: 0.035 seconds, Fetched: 3 row(s)
查看表stu结构
hive> desc stu;
OK
id int
name string
Time taken: 0.044 seconds, Fetched: 2 row(s)
查看表stu的内容
hive> select * from stu;
OK
1001 zhangsan
Time taken: 0.119 seconds, Fetched: 1 row(s)
查看文件系统和历史命令
查看本地文件系统
hive> ! ls /usr/local/src;
flume
hadoop
hbase
hive
jdk
sqoop
zookeeper
查看HDFS文件系统
hive> dfs -ls /;
Found 5 items
drwxr-xr-x - hadoop supergroup 0 2022-05-13 17:29 /hbase
drwxr-xr-x - hadoop supergroup 0 2022-04-15 14:53 /input
drwxr-xr-x - hadoop supergroup 0 2022-04-15 14:53 /output
drwx------ - hadoop supergroup 0 2022-05-06 17:31 /tmp
drwxr-xr-x - hadoop supergroup 0 2022-04-29 17:06 /user
查看Hive输入的所有历史命令
[hadoop@master hive]$ cd /home/hadoop/
[hadoop@master ~]$ cat .hivehistory
quit
exit
create database sample;
use sample;
create table student(number STRING,name STRING);
exit;
use sample;
show tables;
select * from student;
quit;
clear
exit;
show databases;
use default;
show tables;
create table stu(id int,name string);
insert into stu values (1001,"zhangsan")
use default;
show tables;
insert into stu values(1001,"zhangsan");
show tables;
desc stu;
select * from stu;
! ls /usr/local/src;
dfs -ls /;
三、监控大数据平台服务状态
查看ZooKeeper状态
查看ZooKeeper状态
[hadoop@master ~]$ zkServer.sh status
ZooKeeper JMX enabled by default
Using config: /usr/local/src/zookeeper/bin/../conf/zoo.cfg
Mode: follower
#Mode: follower表示ZooKeeper的跟随者
查看运行进程
[hadoop@master ~]$ jps
1968 NameNode
2179 SecondaryNameNode
3654 QuorumPeerMain
3752 Jps
2350 ResourceManager
#QuorumPeerMain已启动
连接ZooKeeper服务
[hadoop@master ~]$ zkCli.sh
Connecting to localhost:2181
2022-05-17 14:44:43,564 [myid:] - INFO [main:Environment@100] - Client environment:zookeeper.version=3.4.8--1, built on 02/06/2016 03:18 GMT
2022-05-17 14:44:43,566 [myid:] - INFO [main:Environment@100] - Client environment:host.name=master
2022-05-17 14:44:43,566 [myid:] - INFO [main:Environment@100] - Client environment:java.version=1.8.0_152
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:java.vendor=Oracle Corporation
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:java.home=/usr/local/src/jdk/jre
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:java.class.path=/usr/local/src/zookeeper/bin/../build/classes:/usr/local/src/zookeeper/bin/../build/lib/*.jar:/usr/local/src/zookeeper/bin/../lib/slf4j-log4j12-1.6.1.jar:/usr/local/src/zookeeper/bin/../lib/slf4j-api-1.6.1.jar:/usr/local/src/zookeeper/bin/../lib/netty-3.7.0.Final.jar:/usr/local/src/zookeeper/bin/../lib/log4j-1.2.16.jar:/usr/local/src/zookeeper/bin/../lib/jline-0.9.94.jar:/usr/local/src/zookeeper/bin/../zookeeper-3.4.8.jar:/usr/local/src/zookeeper/bin/../src/java/lib/*.jar:/usr/local/src/zookeeper/bin/../conf:
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:java.library.path=/usr/java/packages/lib/amd64:/usr/lib64:/lib64:/lib:/usr/lib
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:java.io.tmpdir=/tmp
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:java.compiler=
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:os.name=Linux
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:os.arch=amd64
2022-05-17 14:44:43,568 [myid:] - INFO [main:Environment@100] - Client environment:os.version=4.18.0-373.el8.x86_64
2022-05-17 14:44:43,569 [myid:] - INFO [main:Environment@100] - Client environment:user.name=hadoop
2022-05-17 14:44:43,569 [myid:] - INFO [main:Environment@100] - Client environment:user.home=/home/hadoop
2022-05-17 14:44:43,569 [myid:] - INFO [main:Environment@100] - Client environment:user.dir=/home/hadoop
2022-05-17 14:44:43,570 [myid:] - INFO [main:ZooKeeper@438] - Initiating client connection, connectString=localhost:2181 sessionTimeout=30000 watcher=org.apache.zookeeper.ZooKeeperMain$MyWatcher@69d0a921
Welcome to ZooKeeper!
2022-05-17 14:44:43,592 [myid:] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1032] - Opening socket connection to server localhost/127.0.0.1:2181. Will not attempt to authenticate using SASL (unknown error)
JLine support is enabled
2022-05-17 14:44:43,634 [myid:] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@876] - Socket connection established to localhost/127.0.0.1:2181, initiating session
2022-05-17 14:44:43,646 [myid:] - INFO [main-SendThread(localhost:2181):ClientCnxn$SendThread@1299] - Session establishment complete on server localhost/127.0.0.1:2181, sessionid = 0x180d0c0ef7f0000, negotiated timeout = 30000
WATCHER::
WatchedEvent state:SyncConnected type:None path:null
[zk: localhost:2181(CONNECTED) 0]
使用Watch监听/hbase目录
[zk: localhost:2181(CONNECTED) 0] get /hbase 1
cZxid = 0x400000002
ctime = Fri May 13 17:30:40 CST 2022
mZxid = 0x400000002
mtime = Fri May 13 17:30:40 CST 2022
pZxid = 0x500000004
cversion = 19
dataVersion = 0
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 0
numChildren = 13
[zk: localhost:2181(CONNECTED) 1] set /hbase value-ipdate
WATCHER::
WatchedEvent state:SyncConnected type:NodeDataChanged path:/hbase
cZxid = 0x400000002
ctime = Fri May 13 17:30:40 CST 2022
mZxid = 0x500000009
mtime = Tue May 17 14:47:48 CST 2022
pZxid = 0x500000004
cversion = 19
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 12
numChildren = 13
[zk: localhost:2181(CONNECTED) 2] get /hbase
value-ipdate
cZxid = 0x400000002
ctime = Fri May 13 17:30:40 CST 2022
mZxid = 0x500000009
mtime = Tue May 17 14:47:48 CST 2022
pZxid = 0x500000004
cversion = 19
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 12
numChildren = 13
[zk: localhost:2181(CONNECTED) 3] quit
Quitting...
2022-05-17 14:46:43,808 [myid:] - INFO [main:ZooKeeper@684] - Session: 0x180d0c0ef7f0001 closed
2022-05-17 14:46:43,809 [myid:] - INFO [main-EventThread:ClientCnxn$EventThread@519] - EventThread shut down for session: 0x180d0c0ef7f0001
查看Sqoop状态
查询Sqoop版本号
[hadoop@master ~]$ cd /usr/local/src/sqoop/
[hadoop@master sqoop]$ ./bin/sqoop-version
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
22/05/17 14:47:38 INFO sqoop.Sqoop: Running Sqoop version: 1.4.7
Sqoop 1.4.7
git commit id 2328971411f57f0cb683dfb79d19d4d19d185dd8
Compiled by maugli on Thu Dec 21 15:59:58 STD 2017
测试Sqoop连接数据库
[hadoop@master ~]$ cd /usr/local/src/sqoop/
[hadoop@master sqoop]$ bin/sqoop list-databases --connect jdbc:mysql://master:3306/ --username root --password Password123!
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
22/05/17 14:50:51 INFO sqoop.Sqoop: Running Sqoop version: 1.4.7
22/05/17 14:50:51 WARN tool.BaseSqoopTool: Setting your password on the command-line is insecure. Consider using -P instead.
22/05/17 14:50:51 INFO manager.MySQLManager: Preparing to use a MySQL streaming resultset.
Tue May 17 14:50:51 CST 2022 WARN: Establishing SSL connection without server's identity verification is not recommended. According to MySQL 5.5.45+, 5.6.26+ and 5.7.6+ requirements SSL connection must be established by default if explicit option isn't set. For compliance with existing applications not using SSL the verifyServerCertificate property is set to 'false'. You need either to explicitly disable SSL by setting useSSL=false, or set useSSL=true and provide truststore for server certificate verification.
information_schema
hive
mysql
performance_schema
sample
sys
查看Sqoop 帮助,代表Sqoop启动成功
[hadoop@master sqoop]$ sqoop help
Warning: /usr/local/src/sqoop/../hcatalog does not exist! HCatalog jobs will fail.
Please set $HCAT_HOME to the root of your HCatalog installation.
Warning: /usr/local/src/sqoop/../accumulo does not exist! Accumulo imports will fail.
Please set $ACCUMULO_HOME to the root of your Accumulo installation.
22/05/17 14:51:41 INFO sqoop.Sqoop: Running Sqoop version: 1.4.7
usage: sqoop COMMAND [ARGS]
Available commands:
codegen Generate code to interact with database records
create-hive-table Import a table definition into Hive
eval Evaluate a SQL statement and display the results
export Export an HDFS directory to a database table
help List available commands
import Import a table from a database to HDFS
import-all-tables Import tables from a database to HDFS
import-mainframe Import datasets from a mainframe server to HDFS
job Work with saved jobs
list-databases List available databases on a server
list-tables List available tables in a database
merge Merge results of incremental imports
metastore Run a standalone Sqoop metastore
version Display version information
See 'sqoop help COMMAND' for information on a specific command.
序号 | 命令 | 功能 |
---|---|---|
1 | import | 将数据导入到集群 |
2 | export | 讲集群数据导出 |
3 | codegen | 生成与数据库记录交互的代码 |
4 | create-hive-table | 创建Hive表 |
5 | eval | 查看SQL执行结果 |
6 | import-all-tables | 导入某个数据库下所有表到HDFS中 |
7 | job | 生成一个job |
8 | list-databases | 列出所有数据库名 |
9 | list-tables | 列出某个数据库下所有的表 |
10 | merge | 将HDFS中不同目录下数据合在一起,并存放在指定的目录中 |
11 | metastore | 记录Sqoop job的元数据信息,如果不启动metasrore实例,则默认的元数据存储目录为:~/.sqoop |
12 | help | 打印Sqoop帮助信息 |
13 | version | 打印Sqoop版本信息 |
查看Flume状态
检查Flume安装是否成功
[hadoop@master sqoop]$ cd /usr/local/src/flume/
[hadoop@master flume]$ flume-ng version
Flume 1.6.0
Source code repository: https://git-wip-us.apache.org/repos/asf/flume.git
Revision: 2561a23240a71ba20bf288c7c2cda88f443c2080
Compiled by hshreedharan on Mon May 11 11:15:44 PDT 2015
From source with checksum b29e416802ce9ece3269d34233baf43f
添加example.conf到/usr/local/src/flume
[hadoop@master flume]$ vi /usr/local/src/flume/example.conf
#a1是agent名,r1,k1,c1是a1的三个组件
a1.sources=r1
a1.sinks=k1
a1.channels=c1
#设置r1源文件的类型、路径和文件头属性
a1.sources.r1.type=spooldir
a1.sources.r1.spoolDir=/usr/local/src/hadoop/logs/
a1.sources.r1.fileHeader=true
#设置k1目标存储器属性
a1.sinks.k1.type=hdfs #目标存储器类型hdfs
a1.sinks.k1.hdfs.path=hdfs://master:9000/tmp/flume #目标存储位置
a1.sinks.k1.hdfs.rollsize=1048760 #临时文件达1048760 bytes时,滚动形成目标文件
a1.sinks.k1.hdfs.rollCount=0 #表示不根据events数量1来滚动形成目标文件
a1.sinks.k1.hdfs.rollInterval=900 #间隔900秒将临时文件滚动形成目标文件
a1.sinks.k1.hdfs.useLocalTimeStamp=true #使用本地时间戳
#设置c1暂存容器属性
a1.channels.c1.type=file #使用文件作为暂存容器
a1.channels.c1.capacity=1000
a1.channels.c1.transactionCapacity=100
#使用c1作为源和目标数据的传输通道
a1.sources.r1.channels = c1
a1.sinks.k1.channel = c1
启动Flume Agent a1日志控制台
[hadoop@master flume]$ /usr/local/src/flume/bin/flume-ng agent --conf ./conf/ --conf-file ./example.conf --name a1 -Dflume.root.logger=INFO,console
查看结果
[hadoop@master flume]$ hdfs dfs -lsr /flume
-rw-r--r--2 hadoop supergroup 1300 2022-05-17
:43 /tmp/flume/FlumeData.1651819407082
-rw-r--r--2 hadoop supergroup 2748 2022-05-17
:43 /tmp/flume/FlumeData.1651819407083
-rw-r--r--2 hadoop supergroup 2163 2022-05-17
:43 /tmp/flume/FlumeData.1651819407084
十四、大数据平台监控界面和报表
监控大数据平台运行状态
查看大数据平台状态
http://master:8088/cluster/nodes 或 http://10.10.10.128:8088/cluster/nodes
查看Hadoop状态
http://master:50070 或 http://10.10.10.128:50070
(1)Overview(总览),查看 Hadoop 启动时间、版本号、命名节点日志状态、命名节
点存储状态等信息;
(2)Datanodes(数据节点),查看正在运行、停止运行的数据节点信息;
(3)DataNode Volume Failures(数据节点挂载失败),查看挂载失败的数据节点;
(4)Snapshot(快照),查看快照建立、删除的信息;
(5)Startup Progress(启动进程),查看启动进程信息;
(6)Browse The File System(文件系统浏览),查看 HDFS 中的文件和文件夹;
(7)Logs(日记),查看 Hadoop 的命名节点、资源管理等日志。
1、OverView(概况)
序号 | 参数项 | 信息内容 |
---|---|---|
1 | Stared(启动时间) | 2022.05.25 11:37:43 |
2 | Version(版本号) | Hadoop 的版本号为 2.7.1 |
3 | Compiled(编译) | 2015-06-29T06:04Z by jenkins from (detached from 15ecc87) |
4 | Cluster ID(集群 ID) | CID-656eb3c3-6a82-4a10-8ef8-871f39f749eb |
5 | Block Pool ID(数据块池 ID) | BP-2070913520-192.168.1.6-1587617240295 |
2.Summary(概要)
在 Summary 中的结果显示,Security(安全状态)和 SafeMode(安全模式)处于关闭状 态;HDFS 的 Configured Capacity(存储空间配置)容量为 73.95GB,空闲空间(DFS Remaining) 为 68.96 GB;还显示了名称节点日志状态(Journal Manager State)和名称节点存储 (NameNode Storage)的具体路径等。
监控大数据平台资源状态
监控YARN的状态
查看MapReduce运行日志
http://master:8088/logs 或 http://10.10.10.128:8088/logs
[hadoop@master ~]$ cd /usr/local/src/hadoop/sbin/
[hadoop@master sbin]$ ./mr-jobhistory-daemon.sh start historyserver
starting historyserver, logging to /usr/local/src/hadoop/logs/mapred-hadoop-historyserver-master.example.com.out
监控HDFS状态
http://master:50070 或 http://10.10.10.128:50070 访问 HDFS文件系统,点击 Utilities-->Browse The File Systerm
监控HBase状态
http://master:60010 或 http://10.10.10.128:60010
十五、告警和日志信息监控
查看大数据平台日志信息
查看大数据平台主机日志
[hadoop@master ~]$ cd /var/log
[hadoop@master log]$ ll
total 1180
drwxr-xr-x. 2 root root 4096 Mar 26 11:07 anaconda
drwx------. 2 root root 23 Mar 26 11:09 audit
-rw-------. 1 root root 33980 May 27 2022 boot.log
-rw-rw----. 1 root utmp 384 May 25 11:10 btmp
drwxr-x---. 2 chrony chrony 6 Jun 24 2021 chrony
-rw-------. 1 root root 3811 May 27 2022 cron
-rw-r--r--. 1 root root 13144 May 25 10:40 dnf.librepo.log
-rw-r--r--. 1 root root 34020 May 25 10:40 dnf.log
-rw-r--r--. 1 root root 4958 May 25 10:40 dnf.rpm.log
-rw-r-----. 1 root root 372 May 25 10:03 firewalld
-rw-r--r--. 1 root root 600 May 25 10:40 hawkey.log
-rw-r--r--. 1 root root 3595 May 27 15:42 kdump.log
-rw-rw-r--. 1 root utmp 292292 May 27 15:43 lastlog
-rw-------. 1 root root 0 Mar 26 11:04 maillog
-rw-------. 1 root root 832941 May 27 15:43 messages
-rw-r--r--. 1 mysql mysql 29158 May 27 15:42 mysqld.log
drwx------. 2 root root 6 Mar 26 11:04 private
-rw-------. 1 root root 30513 May 27 15:43 secure
-rw-------. 1 root root 0 Mar 26 11:04 spooler
drwxr-x---. 2 sssd sssd 93 May 25 10:11 sssd
drwxr-xr-x. 2 root root 23 Mar 26 11:09 tuned
-rw-r--r--. 1 root root 244 May 27 14:06 vmware-network.1.log
-rw-r--r--. 1 root root 189 May 27 14:00 vmware-network.2.log
-rw-r--r--. 1 root root 244 May 26 14:02 vmware-network.3.log
-rw-r--r--. 1 root root 189 May 26 13:43 vmware-network.4.log
-rw-r--r--. 1 root root 189 May 25 10:03 vmware-network.5.log
-rw-r--r--. 1 root root 189 Mar 26 11:09 vmware-network.6.log
-rw-r--r--. 1 root root 189 May 27 2022 vmware-network.log
-rw-------. 1 root root 8641 May 27 15:42 vmware-vgauthsvc.log.0
-rw-------. 1 root root 14198 May 27 2022 vmware-vmsvc-root.log
-rw-------. 1 root root 930 May 27 15:42 vmware-vmtoolsd-root.log
-rw-rw-r--. 1 root utmp 16128 May 27 15:42 wtmp
查看内核及公共消息日志
[hadoop@master log]$ su root
Password:
[root@master log]# cat messages
May 27 15:41:45 master kernel: pcieport 0000:00:18.5: PME: Signaling with IRQ 53
May 27 15:41:45 master kernel: pcieport 0000:00:18.5: pciehp: Slot #261 AttnBtn+ PwrCtrl+ MRL- AttnInd- PwrInd- HotPlug+ Surprise- Interlock- NoCompl+ IbPresDis- LLActRep+
May 27 15:41:45 master kernel: pcieport 0000:00:18.6: PME: Signaling with IRQ 54
May 27 15:41:45 master kernel: pcieport 0000:00:18.6: pciehp: Slot #262 AttnBtn+ PwrCtrl+ MRL- AttnInd- PwrInd- HotPlug+ Surprise- Interlock- NoCompl+ IbPresDis- LLActRep+
May 27 15:41:45 master kernel: pcieport 0000:00:18.7: PME: Signaling with IRQ 55
May 27 15:41:45 master kernel: pcieport 0000:00:18.7: pciehp: Slot #263 AttnBtn+ PwrCtrl+ MRL- AttnInd- PwrInd- HotPlug+ Surprise- Interlock- NoCompl+ IbPresDis- LLActRep+
May 27 15:41:45 master kernel: shpchp: Standard Hot Plug PCI Controller Driver version: 0.4
May 27 15:41:45 master kernel: ACPI: AC Adapter [ACAD] (on-line)
May 27 15:41:45 master kernel: input: Power Button as /devices/LNXSYSTM:00/LNXPWRBN:00/input/input0
May 27 15:41:45 master kernel: ACPI: Power Button [PWRF]
May 27 15:41:45 master kernel: Serial: 8250/16550 driver, 4 ports, IRQ sharing enabled
May 27 15:41:45 master kernel: 00:05: ttyS0 at I/O 0x3f8 (irq = 4, base_baud = 115200) is a 16550A
May 27 15:41:45 master kernel: Non-volatile memory driver v1.3
May 27 15:41:45 master kernel: rdac: device handler registered
May 27 15:41:45 master kernel: hp_sw: device handler registered
May 27 15:41:45 master kernel: emc: device handler registered
May 27 15:41:45 master kernel: alua: device handler registered
May 27 15:41:45 master kernel: libphy: Fixed MDIO Bus: probed
May 27 15:41:45 master kernel: ehci_hcd: USB 2.0 'Enhanced' Host Controller (EHCI) Driver
May 27 15:41:45 master kernel: ehci-pci: EHCI PCI platform driver
May 27 15:41:45 master kernel: ehci-pci 0000:02:03.0: EHCI Host Controller
May 27 15:41:45 master kernel: ehci-pci 0000:02:03.0: new USB bus registered, assigned bus number 1
May 27 15:41:45 master kernel: ehci-pci 0000:02:03.0: irq 17, io mem 0xfd5ef000
May 27 15:41:45 master kernel: ehci-pci 0000:02:03.0: USB 2.0 started, EHCI 1.00
May 27 15:41:45 master kernel: usb usb1: New USB device found, idVendor=1d6b, idProduct=0002, bcdDevice= 4.18
May 27 15:41:45 master kernel: usb usb1: New USB device strings: Mfr=3, Product=2, SerialNumber=1
May 27 15:41:45 master kernel: usb usb1: Product: EHCI Host Controller
May 27 15:41:45 master kernel: usb usb1: Manufacturer: Linux 4.18.0-373.el8.x86_64 ehci_hcd
May 27 15:41:45 master kernel: usb usb1: SerialNumber: 0000:02:03.0
May 27 15:41:45 master kernel: hub 1-0:1.0: USB hub found
May 27 15:41:45 master kernel: hub 1-0:1.0: 6 ports detected
May 27 15:41:45 master kernel: ohci_hcd: USB 1.1 'Open' Host Controller (OHCI) Driver
May 27 15:41:45 master kernel: ohci-pci: OHCI PCI platform driver
May 27 15:41:45 master kernel: uhci_hcd: USB Universal Host Controller Interface driver
查看计划任务日志
[root@master log]# cat cron
Mar 26 11:09:16 localhost crond[1052]: (CRON) STARTUP (1.5.2)
Mar 26 11:09:16 localhost crond[1052]: (CRON) INFO (Syslog will be used instead of sendmail.)
Mar 26 11:09:16 localhost crond[1052]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 45% if used.)
Mar 26 11:09:16 localhost crond[1052]: (CRON) INFO (running with inotify support)
May 25 10:03:47 localhost crond[1010]: (CRON) STARTUP (1.5.2)
May 25 10:03:47 localhost crond[1010]: (CRON) INFO (Syslog will be used instead of sendmail.)
May 25 10:03:47 localhost crond[1010]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 70% if used.)
May 25 10:03:47 localhost crond[1010]: (CRON) INFO (running with inotify support)
May 25 11:01:01 localhost CROND[18549]: (root) CMD (run-parts /etc/cron.hourly)
May 25 11:01:01 localhost run-parts[18549]: (/etc/cron.hourly) starting 0anacron
May 25 11:01:01 localhost anacron[18558]: Anacron started on 2022-05-25
May 25 11:01:01 localhost anacron[18558]: Will run job `cron.daily' in 21 min.
May 25 11:01:01 localhost anacron[18558]: Will run job `cron.weekly' in 41 min.
May 25 11:01:01 localhost anacron[18558]: Will run job `cron.monthly' in 61 min.
May 25 11:01:01 localhost anacron[18558]: Jobs will be executed sequentially
May 25 11:01:01 localhost run-parts[18549]: (/etc/cron.hourly) finished 0anacron
May 25 11:22:01 localhost anacron[18558]: Job `cron.daily' started
May 25 11:22:01 localhost run-parts[24041]: (/etc/cron.daily) starting logrotate
May 25 11:22:02 localhost run-parts[24041]: (/etc/cron.daily) finished logrotate
May 25 11:22:02 localhost anacron[18558]: Job `cron.daily' terminated
May 26 13:43:10 master crond[1114]: (CRON) STARTUP (1.5.2)
May 26 13:43:10 master crond[1114]: (CRON) INFO (Syslog will be used instead of sendmail.)
May 26 13:43:10 master crond[1114]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 6% if used.)
May 26 13:43:10 master crond[1114]: (CRON) INFO (running with inotify support)
May 26 14:01:01 master CROND[3323]: (root) CMD (run-parts /etc/cron.hourly)
May 26 14:01:01 master run-parts[3323]: (/etc/cron.hourly) starting 0anacron
May 26 14:01:01 master anacron[3332]: Anacron started on 2022-05-26
May 26 14:01:01 master run-parts[3323]: (/etc/cron.hourly) finished 0anacron
May 26 14:01:01 master anacron[3332]: Will run job `cron.daily' in 38 min.
May 26 14:01:01 master anacron[3332]: Will run job `cron.weekly' in 58 min.
May 26 14:01:01 master anacron[3332]: Will run job `cron.monthly' in 78 min.
May 26 14:01:01 master anacron[3332]: Jobs will be executed sequentially
May 27 14:00:34 master crond[1066]: (CRON) STARTUP (1.5.2)
May 27 14:00:34 master crond[1066]: (CRON) INFO (Syslog will be used instead of sendmail.)
May 27 14:00:34 master crond[1066]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 95% if used.)
May 27 14:00:35 master crond[1066]: (CRON) INFO (running with inotify support)
May 27 14:01:01 master CROND[1658]: (root) CMD (run-parts /etc/cron.hourly)
May 27 14:01:01 master run-parts[1658]: (/etc/cron.hourly) starting 0anacron
May 27 14:01:01 master anacron[1667]: Anacron started on 2022-05-27
May 27 14:01:01 master run-parts[1658]: (/etc/cron.hourly) finished 0anacron
May 27 14:01:01 master anacron[1667]: Will run job `cron.daily' in 42 min.
May 27 14:01:01 master anacron[1667]: Will run job `cron.weekly' in 62 min.
May 27 14:01:01 master anacron[1667]: Will run job `cron.monthly' in 82 min.
May 27 14:01:01 master anacron[1667]: Jobs will be executed sequentially
May 27 15:43:44 master crond[1209]: (CRON) STARTUP (1.5.2)
May 27 15:43:44 master crond[1209]: (CRON) INFO (Syslog will be used instead of sendmail.)
May 27 15:43:44 master crond[1209]: (CRON) INFO (RANDOM_DELAY will be scaled with factor 25% if used.)
May 27 15:43:45 master crond[1209]: (CRON) INFO (running with inotify support)
查看系统引导日志
[root@master log]# dmesg
[ 0.306638] pci 0000:00:10.0: BAR 6: assigned [mem 0xc0008000-0xc000bfff pref]
[ 0.306641] pci 0000:00:15.3: BAR 13: no space for [io size 0x1000]
[ 0.306643] pci 0000:00:15.3: BAR 13: failed to assign [io size 0x1000]
[ 0.306645] pci 0000:00:15.4: BAR 13: no space for [io size 0x1000]
[ 0.306647] pci 0000:00:15.4: BAR 13: failed to assign [io size 0x1000]
[ 0.306648] pci 0000:00:15.5: BAR 13: no space for [io size 0x1000]
[ 0.306650] pci 0000:00:15.5: BAR 13: failed to assign [io size 0x1000]
[ 0.306652] pci 0000:00:15.6: BAR 13: no space for [io size 0x1000]
[ 0.306653] pci 0000:00:15.6: BAR 13: failed to assign [io size 0x1000]
[ 0.306655] pci 0000:00:15.7: BAR 13: no space for [io size 0x1000]
[ 0.306656] pci 0000:00:15.7: BAR 13: failed to assign [io size 0x1000]
[ 0.306658] pci 0000:00:16.3: BAR 13: no space for [io size 0x1000]
[ 0.306659] pci 0000:00:16.3: BAR 13: failed to assign [io size 0x1000]
[ 0.306661] pci 0000:00:16.4: BAR 13: no space for [io size 0x1000]
[ 0.306662] pci 0000:00:16.4: BAR 13: failed to assign [io size 0x1000]
[ 0.306664] pci 0000:00:16.5: BAR 13: no space for [io size 0x1000]
[ 0.306665] pci 0000:00:16.5: BAR 13: failed to assign [io size 0x1000]
[ 0.306667] pci 0000:00:16.6: BAR 13: no space for [io size 0x1000]
[ 0.306668] pci 0000:00:16.6: BAR 13: failed to assign [io size 0x1000]
[ 0.306670] pci 0000:00:16.7: BAR 13: no space for [io size 0x1000]
[ 0.306671] pci 0000:00:16.7: BAR 13: failed to assign [io size 0x1000]
[ 0.306673] pci 0000:00:17.3: BAR 13: no space for [io size 0x1000]
[ 0.306674] pci 0000:00:17.3: BAR 13: failed to assign [io size 0x1000]
[ 0.306676] pci 0000:00:17.4: BAR 13: no space for [io size 0x1000]
[ 0.306677] pci 0000:00:17.4: BAR 13: failed to assign [io size 0x1000]
[ 0.306679] pci 0000:00:17.5: BAR 13: no space for [io size 0x1000]
[ 0.306680] pci 0000:00:17.5: BAR 13: failed to assign [io size 0x1000]
[ 0.306682] pci 0000:00:17.6: BAR 13: no space for [io size 0x1000]
[ 0.306683] pci 0000:00:17.6: BAR 13: failed to assign [io size 0x1000]
[ 0.306685] pci 0000:00:17.7: BAR 13: no space for [io size 0x1000]
[ 0.306686] pci 0000:00:17.7: BAR 13: failed to assign [io size 0x1000]
[ 0.306688] pci 0000:00:18.2: BAR 13: no space for [io size 0x1000]
[ 0.306689] pci 0000:00:18.2: BAR 13: failed to assign [io size 0x1000]
[ 0.306691] pci 0000:00:18.3: BAR 13: no space for [io size 0x1000]
[ 0.306692] pci 0000:00:18.3: BAR 13: failed to assign [io size 0x1000]
[ 0.306694] pci 0000:00:18.4: BAR 13: no space for [io size 0x1000]
[ 0.306695] pci 0000:00:18.4: BAR 13: failed to assign [io size 0x1000]
[ 0.306697] pci 0000:00:18.5: BAR 13: no space for [io size 0x1000]
[ 0.306699] pci 0000:00:18.5: BAR 13: failed to assign [io size 0x1000]
[ 0.306700] pci 0000:00:18.6: BAR 13: no space for [io size 0x1000]
[ 0.306702] pci 0000:00:18.6: BAR 13: failed to assign [io size 0x1000]
[ 0.306703] pci 0000:00:18.7: BAR 13: no space for [io size 0x1000]
[ 0.306705] pci 0000:00:18.7: BAR 13: failed to assign [io size 0x1000]
[ 0.306709] pci 0000:00:18.7: BAR 13: no space for [io size 0x1000]
[ 0.306711] pci 0000:00:18.7: BAR 13: failed to assign [io size 0x1000]
[ 0.306712] pci 0000:00:18.6: BAR 13: no space for [io size 0x1000]
[ 0.306714] pci 0000:00:18.6: BAR 13: failed to assign [io size 0x1000]
[ 0.306716] pci 0000:00:18.5: BAR 13: no space for [io size 0x1000]
[ 0.306717] pci 0000:00:18.5: BAR 13: failed to assign [io size 0x1000]
[ 0.306719] pci 0000:00:18.4: BAR 13: no space for [io size 0x1000]
[ 0.306720] pci 0000:00:18.4: BAR 13: failed to assign [io size 0x1000]
[ 0.306722] pci 0000:00:18.3: BAR 13: no space for [io size 0x1000]
[ 0.306723] pci 0000:00:18.3: BAR 13: failed to assign [io size 0x1000]
查看邮件系统日志
该日志文件记录了每一个发送到系统或从系统发出的电子邮件的活动。它可以用来查看 用户使用哪个系统发送工具或把数据发送到哪个系统。可以采用 cat /var/log/maillog 或 者 tail -f /var/log/maillog 查看电子邮件的活动。
查看用户登录日志
这种日志数据用于记录 Linux 操作系统用户登录及退出系统的相关信息,包括用户名、 登录的终端、登录时间、来源主机、正在使用的进程操作等。
以下文件保存了用户登录、退出系统等相关信息
1)/var/log/lastlog :最近的用户登录事件
2)/var/log/wtmp :用户登录注销及系统开、关机事件
3)/var/run/utmp :当前登录的每个用户的详细信息
4)/var/log/secure :与用户验证相关的安全性事件
(1)lastlog 列出所有用户最近登录的信息
lastlog 引用的是/var/log/lastlog 文件中的信息,包括登录名、端口、最后登录时 间等。
[root@master log]# lastlog
Username Port From Latest
root pts/0 Fri May 27 15:46:28 +0800 2022
bin **Never logged in**
daemon **Never logged in**
adm **Never logged in**
lp **Never logged in**
sync **Never logged in**
shutdown **Never logged in**
halt **Never logged in**
mail **Never logged in**
operator **Never logged in**
games **Never logged in**
ftp **Never logged in**
nobody **Never logged in**
dbus **Never logged in**
systemd-coredump **Never logged in**
systemd-resolve **Never logged in**
tss **Never logged in**
polkitd **Never logged in**
unbound **Never logged in**
sssd **Never logged in**
sshd **Never logged in**
hadoop pts/0 Fri May 27 15:46:13 +0800 2022
mysql **Never logged in**
chrony **Never logged in**
(2)last 列出当前和曾经登入系统的用户信息
它默认读取的是/var/log/wtmp 文件的信息。输出的内容包括:用户名、终端位置、登 录源信息、开始时间、结束时间、持续时间。注意最后一行输出的是 wtmp 文件起始记录的 时间。当然也可以通过 last -f 参数指定读取文件,可以是/var/log/btmp、/var/run/utmp 文件。
[root@master log]# last
root pts/0 10.10.10.1 Fri May 27 15:42 still logged in
reboot system boot 4.18.0-373.el8.x Fri May 27 15:41 still running
root tty1 Fri May 27 13:59 - 14:06 (00:07)
reboot system boot 4.18.0-373.el8.x Fri May 27 13:58 - 14:06 (00:07)
root pts/0 10.10.10.1 Thu May 26 13:43 - 14:02 (00:19)
root tty1 Thu May 26 13:42 - 14:02 (00:19)
reboot system boot 4.18.0-373.el8.x Thu May 26 13:41 - 14:02 (00:21)
root pts/0 10.10.10.1 Wed May 25 10:04 - 11:39 (01:35)
root tty1 Wed May 25 10:04 - 11:39 (01:35)
reboot system boot 4.18.0-373.el8.x Wed May 25 10:03 - 11:40 (01:36)
root pts/0 10.10.10.1 Sat Mar 26 11:10 - 11:10 (00:00)
root tty1 Sat Mar 26 11:09 - 11:09 (00:00)
reboot system boot 4.18.0-373.el8.x Sat Mar 26 11:08 - 11:10 (00:01)
wtmp begins Sat Mar 26 11:08:59 2022
切换到 root 用户,使用命令 last -f /var/run/utmp,查看 utmp 文件
[root@master log]# last -f /var/run/utmp
root pts/0 10.10.10.1 Fri May 27 15:42 still logged in
reboot system boot 4.18.0-373.el8.x Fri May 27 15:41 still running
utmp begins Fri May 27 15:41:50 2022
(3)lastb 列出失败尝试的登录信息
lastb 和 last 命令功能完全相同,只不过它默认读取的是/var/log/btmp 文件的信息。
[root@master log]# lastb
root pts/0 Wed May 25 11:10 - 11:10 (00:00)
btmp begins Wed May 25 11:10:52 2022
(4)通过 Linux 系统安全日志文件/var/log/secure 可查看 SSH 登录行为,该文件读 取需要 root 权限。
切换为 root 用户,执行 cat /var/log/secure 命令查看服务器登陆行为
[root@master log]# cat /var/log/secure
Mar 26 11:09:12 localhost polkitd[920]: Loading rules from directory /etc/polkit-1/rules.d
Mar 26 11:09:12 localhost polkitd[920]: Loading rules from directory /usr/share/polkit-1/rules.d
Mar 26 11:09:12 localhost polkitd[920]: Finished loading, compiling and executing 2 rules
Mar 26 11:09:12 localhost polkitd[920]: Acquired the name org.freedesktop.PolicyKit1 on the system bus
Mar 26 11:09:16 localhost sshd[1046]: Server listening on 0.0.0.0 port 22.
Mar 26 11:09:16 localhost sshd[1046]: Server listening on :: port 22.
Mar 26 11:09:35 localhost systemd[5719]: pam_unix(systemd-user:session): session opened for user root by (uid=0)
Mar 26 11:09:35 localhost login[1056]: pam_unix(login:session): session opened for user root by LOGIN(uid=0)
Mar 26 11:09:35 localhost login[1056]: ROOT LOGIN ON tty1
Mar 26 11:09:38 localhost login[1056]: pam_unix(login:session): session closed for user root
Mar 26 11:10:44 localhost sshd[5782]: Accepted password for root from 10.10.10.1 port 58134 ssh2
Mar 26 11:10:44 localhost systemd[5788]: pam_unix(systemd-user:session): session opened for user root by (uid=0)
Mar 26 11:10:44 localhost sshd[5782]: pam_unix(sshd:session): session opened for user root by (uid=0)
May 25 10:03:46 localhost polkitd[908]: Loading rules from directory /etc/polkit-1/rules.d
May 25 10:03:46 localhost polkitd[908]: Loading rules from directory /usr/share/polkit-1/rules.d
May 25 10:03:46 localhost polkitd[908]: Finished loading, compiling and executing 2 rules
May 25 10:03:46 localhost polkitd[908]: Acquired the name org.freedesktop.PolicyKit1 on the system bus
May 25 10:03:47 localhost sshd[998]: Server listening on 0.0.0.0 port 22.
May 25 10:03:47 localhost sshd[998]: Server listening on :: port 22.
May 25 10:04:12 localhost systemd[1580]: pam_unix(systemd-user:session): session opened for user root by (uid=0)
May 25 10:04:12 localhost login[1016]: pam_unix(login:session): session opened for user root by LOGIN(uid=0)
May 25 10:04:12 localhost login[1016]: ROOT LOGIN ON tty1
May 25 10:04:56 localhost sshd[1620]: Accepted password for root from 10.10.10.1 port 1244 ssh2
May 25 10:04:56 localhost sshd[1620]: pam_unix(sshd:session): session opened for user root by (uid=0)
May 25 10:08:19 localhost sshd[1667]: Accepted password for root from 10.10.10.1 port 1361 ssh2
May 25 10:08:19 localhost sshd[1667]: pam_unix(sshd:session): session opened for user root by (uid=0)
May 25 10:08:37 localhost sshd[1667]: pam_unix(sshd:session): session closed for user root
May 25 10:16:21 localhost useradd[1946]: new group: name=hadoop, GID=1000
May 25 10:16:21 localhost useradd[1946]: new user: name=hadoop, UID=1000, GID=1000, home=/home/hadoop, shell=/bin/bash
May 25 10:16:21 localhost passwd[1954]: pam_unix(passwd:chauthtok): password changed for hadoop
May 25 10:18:21 localhost su[1970]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:18:21 localhost su[1970]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 10:19:49 localhost sshd[2345]: Connection closed by authenticating user hadoop 10.10.10.128 port 37120 [preauth]
May 25 10:19:49 localhost sshd[2353]: Connection closed by authenticating user hadoop 10.10.10.128 port 37122 [preauth]
May 25 10:19:51 localhost sshd[2364]: Accepted password for hadoop from 10.10.10.128 port 37124 ssh2
May 25 10:19:51 localhost systemd[2370]: pam_unix(systemd-user:session): session opened for user hadoop by (uid=0)
May 25 10:19:51 localhost sshd[2364]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:19:51 localhost sshd[2380]: Received disconnect from 10.10.10.128 port 37124:11: disconnected by user
May 25 10:19:51 localhost sshd[2380]: Disconnected from user hadoop 10.10.10.128 port 37124
May 25 10:19:51 localhost sshd[2364]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:19:56 localhost sshd[2490]: Accepted publickey for hadoop from 10.10.10.128 port 37126 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:19:56 localhost sshd[2490]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:20:01 localhost sshd[2493]: Received disconnect from 10.10.10.128 port 37126:11: disconnected by user
May 25 10:20:01 localhost sshd[2493]: Disconnected from user hadoop 10.10.10.128 port 37126
May 25 10:20:01 localhost sshd[2490]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:20:12 localhost sshd[2628]: Accepted publickey for hadoop from 127.0.0.1 port 40436 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:20:12 localhost systemd[2639]: pam_unix(systemd-user:session): session opened for user hadoop by (uid=0)
May 25 10:20:12 localhost sshd[2628]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:20:17 localhost sshd[2649]: Received disconnect from 127.0.0.1 port 40436:11: disconnected by user
May 25 10:20:17 localhost sshd[2649]: Disconnected from user hadoop 127.0.0.1 port 40436
May 25 10:20:17 localhost sshd[2628]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:20:24 localhost sshd[2845]: Accepted publickey for hadoop from 10.10.10.128 port 37134 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:20:24 localhost sshd[2845]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:20:28 localhost sshd[2848]: Received disconnect from 10.10.10.128 port 37134:11: disconnected by user
May 25 10:20:28 localhost sshd[2848]: Disconnected from user hadoop 10.10.10.128 port 37134
May 25 10:20:28 localhost sshd[2845]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:20:33 localhost sshd[3051]: Accepted publickey for hadoop from 127.0.0.1 port 40444 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:20:33 localhost sshd[3051]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:20:38 localhost sshd[3054]: Received disconnect from 127.0.0.1 port 40444:11: disconnected by user
May 25 10:20:38 localhost sshd[3054]: Disconnected from user hadoop 127.0.0.1 port 40444
May 25 10:20:38 localhost sshd[3051]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:23:45 localhost su[3917]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:23:45 localhost su[3917]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 10:27:02 localhost sshd[12480]: Accepted password for root from 10.10.10.1 port 1538 ssh2
May 25 10:27:02 localhost sshd[12480]: pam_unix(sshd:session): session opened for user root by (uid=0)
May 25 10:27:21 localhost sshd[12480]: pam_unix(sshd:session): session closed for user root
May 25 10:28:14 localhost groupadd[12587]: group added to /etc/group: name=mysql, GID=27
May 25 10:28:14 localhost groupadd[12587]: group added to /etc/gshadow: name=mysql
May 25 10:28:14 localhost groupadd[12587]: new group: name=mysql, GID=27
May 25 10:28:14 localhost useradd[12593]: new user: name=mysql, UID=27, GID=27, home=/var/lib/mysql, shell=/bin/false
May 25 10:33:43 localhost su[15259]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:33:43 localhost su[15259]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 10:38:19 localhost su[15312]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:38:19 localhost su[15312]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 10:38:24 localhost su[15340]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:38:24 localhost su[15340]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 10:38:32 localhost sshd[15464]: Accepted publickey for hadoop from 10.10.10.128 port 37282 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:38:32 localhost sshd[15464]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:38:37 localhost sshd[15471]: Received disconnect from 10.10.10.128 port 37282:11: disconnected by user
May 25 10:38:37 localhost sshd[15471]: Disconnected from user hadoop 10.10.10.128 port 37282
May 25 10:38:37 localhost sshd[15464]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:38:43 localhost sshd[15606]: Accepted publickey for hadoop from 127.0.0.1 port 40608 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:38:43 localhost sshd[15606]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:38:48 localhost sshd[15609]: Received disconnect from 127.0.0.1 port 40608:11: disconnected by user
May 25 10:38:48 localhost sshd[15609]: Disconnected from user hadoop 127.0.0.1 port 40608
May 25 10:38:48 localhost sshd[15606]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:39:05 localhost sshd[15926]: Accepted publickey for hadoop from 10.10.10.128 port 37310 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:39:05 localhost systemd[15931]: pam_unix(systemd-user:session): session opened for user hadoop by (uid=0)
May 25 10:39:05 localhost sshd[15926]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:39:10 localhost sshd[15941]: Received disconnect from 10.10.10.128 port 37310:11: disconnected by user
May 25 10:39:10 localhost sshd[15941]: Disconnected from user hadoop 10.10.10.128 port 37310
May 25 10:39:10 localhost sshd[15926]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:39:15 localhost sshd[16145]: Accepted publickey for hadoop from 127.0.0.1 port 40620 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:39:15 localhost sshd[16145]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:39:19 localhost sshd[16148]: Received disconnect from 127.0.0.1 port 40620:11: disconnected by user
May 25 10:39:19 localhost sshd[16148]: Disconnected from user hadoop 127.0.0.1 port 40620
May 25 10:39:19 localhost sshd[16145]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:40:52 localhost su[16825]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:40:52 localhost su[16825]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 10:40:57 localhost groupadd[16862]: group added to /etc/group: name=chrony, GID=992
May 25 10:40:57 localhost groupadd[16862]: group added to /etc/gshadow: name=chrony
May 25 10:40:57 localhost groupadd[16862]: new group: name=chrony, GID=992
May 25 10:40:57 localhost useradd[16870]: new user: name=chrony, UID=995, GID=992, home=/var/lib/chrony, shell=/sbin/nologin
May 25 10:45:34 localhost su[17268]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:45:34 localhost su[17268]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 10:48:03 localhost su[17507]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:48:03 localhost su[17507]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 10:51:42 localhost su[17604]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:51:42 localhost su[17604]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 10:51:46 localhost sshd[17729]: Accepted publickey for hadoop from 10.10.10.128 port 37700 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:51:46 localhost sshd[17729]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:51:46 localhost sshd[17732]: Received disconnect from 10.10.10.128 port 37700:11: disconnected by user
May 25 10:51:46 localhost sshd[17732]: Disconnected from user hadoop 10.10.10.128 port 37700
May 25 10:51:46 localhost sshd[17729]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:51:48 localhost sshd[17862]: Accepted publickey for hadoop from 127.0.0.1 port 41012 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 10:51:48 localhost sshd[17862]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 10:51:48 localhost sshd[17865]: Received disconnect from 127.0.0.1 port 41012:11: disconnected by user
May 25 10:51:48 localhost sshd[17865]: Disconnected from user hadoop 127.0.0.1 port 41012
May 25 10:51:48 localhost sshd[17862]: pam_unix(sshd:session): session closed for user hadoop
May 25 10:55:34 localhost su[18482]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 10:55:34 localhost su[18482]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 11:02:30 localhost su[18569]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:02:30 localhost su[18569]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 11:02:37 localhost sshd[18692]: Accepted publickey for hadoop from 10.10.10.128 port 38066 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:02:37 localhost sshd[18692]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:02:37 localhost sshd[18695]: Received disconnect from 10.10.10.128 port 38066:11: disconnected by user
May 25 11:02:37 localhost sshd[18695]: Disconnected from user hadoop 10.10.10.128 port 38066
May 25 11:02:37 localhost sshd[18692]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:02:38 localhost sshd[18826]: Accepted publickey for hadoop from 127.0.0.1 port 41376 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:02:38 localhost sshd[18826]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:02:38 localhost sshd[18829]: Received disconnect from 127.0.0.1 port 41376:11: disconnected by user
May 25 11:02:38 localhost sshd[18829]: Disconnected from user hadoop 127.0.0.1 port 41376
May 25 11:02:38 localhost sshd[18826]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:02:44 localhost sshd[19204]: Accepted publickey for hadoop from 10.10.10.128 port 38080 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:02:44 localhost sshd[19204]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:02:45 localhost sshd[19207]: Received disconnect from 10.10.10.128 port 38080:11: disconnected by user
May 25 11:02:45 localhost sshd[19207]: Disconnected from user hadoop 10.10.10.128 port 38080
May 25 11:02:45 localhost sshd[19204]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:04:08 localhost su[19516]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:04:08 localhost su[19516]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 11:09:18 localhost su[19573]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:09:18 localhost su[19573]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 11:10:04 localhost sshd[19703]: Accepted publickey for hadoop from 10.10.10.128 port 38378 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:10:04 localhost sshd[19703]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:10:04 localhost sshd[19706]: Received disconnect from 10.10.10.128 port 38378:11: disconnected by user
May 25 11:10:04 localhost sshd[19706]: Disconnected from user hadoop 10.10.10.128 port 38378
May 25 11:10:04 localhost sshd[19703]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:10:08 localhost sshd[19836]: Accepted publickey for hadoop from 127.0.0.1 port 41688 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:10:08 localhost sshd[19836]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:10:08 localhost sshd[19839]: Received disconnect from 127.0.0.1 port 41688:11: disconnected by user
May 25 11:10:08 localhost sshd[19839]: Disconnected from user hadoop 127.0.0.1 port 41688
May 25 11:10:08 localhost sshd[19836]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:10:16 localhost sshd[20215]: Accepted publickey for hadoop from 10.10.10.128 port 38396 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:10:16 localhost sshd[20215]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:10:17 localhost sshd[20218]: Received disconnect from 10.10.10.128 port 38396:11: disconnected by user
May 25 11:10:17 localhost sshd[20218]: Disconnected from user hadoop 10.10.10.128 port 38396
May 25 11:10:17 localhost sshd[20215]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:10:50 localhost unix_chkpwd[20518]: password check failed for user (root)
May 25 11:10:50 localhost su[20516]: pam_unix(su-l:auth): authentication failure; logname=root uid=1000 euid=0 tty=pts/0 ruser=root rhost= user=root
May 25 11:10:57 localhost su[20519]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:10:57 localhost su[20519]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 11:11:14 localhost su[20548]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:11:14 localhost su[20548]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 11:11:27 localhost sshd[20858]: Accepted publickey for hadoop from 10.10.10.128 port 38494 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:11:27 localhost sshd[20858]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:11:27 localhost sshd[20861]: Received disconnect from 10.10.10.128 port 38494:11: disconnected by user
May 25 11:11:27 localhost sshd[20861]: Disconnected from user hadoop 10.10.10.128 port 38494
May 25 11:11:27 localhost sshd[20858]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:11:35 localhost sshd[21079]: Accepted publickey for hadoop from 10.10.10.128 port 38518 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:11:35 localhost sshd[21079]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:11:36 localhost sshd[21082]: Received disconnect from 10.10.10.128 port 38518:11: disconnected by user
May 25 11:11:36 localhost sshd[21082]: Disconnected from user hadoop 10.10.10.128 port 38518
May 25 11:11:36 localhost sshd[21079]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:11:50 localhost sshd[21473]: Accepted publickey for hadoop from 10.10.10.128 port 38598 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:11:50 localhost sshd[21473]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:11:55 localhost sshd[21476]: Received disconnect from 10.10.10.128 port 38598:11: disconnected by user
May 25 11:11:55 localhost sshd[21476]: Disconnected from user hadoop 10.10.10.128 port 38598
May 25 11:11:55 localhost sshd[21473]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:12:01 localhost sshd[21616]: Accepted publickey for hadoop from 127.0.0.1 port 41960 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:12:01 localhost sshd[21616]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:12:06 localhost sshd[21619]: Received disconnect from 127.0.0.1 port 41960:11: disconnected by user
May 25 11:12:06 localhost sshd[21619]: Disconnected from user hadoop 127.0.0.1 port 41960
May 25 11:12:06 localhost sshd[21616]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:12:32 localhost sshd[21938]: Accepted publickey for hadoop from 10.10.10.128 port 38680 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:12:32 localhost systemd[21943]: pam_unix(systemd-user:session): session opened for user hadoop by (uid=0)
May 25 11:12:32 localhost sshd[21938]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:12:36 localhost sshd[21952]: Received disconnect from 10.10.10.128 port 38680:11: disconnected by user
May 25 11:12:36 localhost sshd[21952]: Disconnected from user hadoop 10.10.10.128 port 38680
May 25 11:12:36 localhost sshd[21938]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:12:41 localhost sshd[22157]: Accepted publickey for hadoop from 127.0.0.1 port 42000 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:12:41 localhost sshd[22157]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:12:46 localhost sshd[22160]: Received disconnect from 127.0.0.1 port 42000:11: disconnected by user
May 25 11:12:46 localhost sshd[22160]: Disconnected from user hadoop 127.0.0.1 port 42000
May 25 11:12:46 localhost sshd[22157]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:12:57 localhost sshd[22810]: Accepted publickey for hadoop from 10.10.10.128 port 38704 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:12:57 localhost sshd[22810]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:12:58 localhost sshd[22813]: Received disconnect from 10.10.10.128 port 38704:11: disconnected by user
May 25 11:12:58 localhost sshd[22813]: Disconnected from user hadoop 10.10.10.128 port 38704
May 25 11:12:58 localhost sshd[22810]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:17:44 localhost su[23236]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:17:44 localhost su[23236]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 11:19:23 localhost su[23277]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:19:23 localhost su[23277]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 11:19:28 localhost sshd[23400]: Accepted publickey for hadoop from 10.10.10.128 port 38828 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:19:28 localhost sshd[23400]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:19:28 localhost sshd[23403]: Received disconnect from 10.10.10.128 port 38828:11: disconnected by user
May 25 11:19:28 localhost sshd[23403]: Disconnected from user hadoop 10.10.10.128 port 38828
May 25 11:19:28 localhost sshd[23400]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:19:29 localhost sshd[23534]: Accepted publickey for hadoop from 127.0.0.1 port 42138 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:19:29 localhost sshd[23534]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:19:29 localhost sshd[23537]: Received disconnect from 127.0.0.1 port 42138:11: disconnected by user
May 25 11:19:29 localhost sshd[23537]: Disconnected from user hadoop 127.0.0.1 port 42138
May 25 11:19:29 localhost sshd[23534]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:22:13 localhost su[24050]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:22:13 localhost su[24050]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 11:23:01 localhost su[24085]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:23:01 localhost su[24085]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 11:24:26 localhost sshd[24213]: Accepted publickey for hadoop from 10.10.10.128 port 38872 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:24:26 localhost sshd[24213]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:24:27 localhost sshd[24216]: Received disconnect from 10.10.10.128 port 38872:11: disconnected by user
May 25 11:24:27 localhost sshd[24216]: Disconnected from user hadoop 10.10.10.128 port 38872
May 25 11:24:27 localhost sshd[24213]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:24:28 localhost sshd[24346]: Accepted publickey for hadoop from 127.0.0.1 port 42182 ssh2: RSA SHA256:aWRVRxVan20w61fcneArrM2NNgE3+DmuBZgP/eobJoQ
May 25 11:24:28 localhost sshd[24346]: pam_unix(sshd:session): session opened for user hadoop by (uid=0)
May 25 11:24:28 localhost sshd[24349]: Received disconnect from 127.0.0.1 port 42182:11: disconnected by user
May 25 11:24:28 localhost sshd[24349]: Disconnected from user hadoop 127.0.0.1 port 42182
May 25 11:24:28 localhost sshd[24346]: pam_unix(sshd:session): session closed for user hadoop
May 25 11:28:05 localhost su[27527]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:28:05 localhost su[27527]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
May 25 11:39:02 localhost su[27597]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:39:02 localhost su[27597]: pam_unix(su-l:session): session opened for user hadoop by root(uid=0)
May 25 11:39:55 localhost su[27721]: pam_systemd(su-l:session): Cannot create session: Already running in a session or user slice
May 25 11:39:55 localhost su[27721]: pam_unix(su-l:session): session opened for user root by root(uid=1000)
在Hadoop MapReduce Jobs中查看日志信息
在浏览器地址栏中输入 http://master:19888/jobhistory,将显示关于作业的摘要信 息,
**注意:需先启动 jobhistory 进程 **
[hadoop@master ~]$ cd /usr/local/src/hadoop/sbin/
[hadoop@master sbin]$ ./mr-jobhistory-daemon.sh start historyserver
starting historyserver, logging to /usr/local/src/hadoop/logs/mapred-hadoop-historyserver-master.example.com.out
通过用户界面查看Hadoop日志
默认情况下,可以通过以下 URL 访问日志,http://master:19888
默认资源管理器 Web 界面可通过以下 URL 访问: http://master:8088
我们也可以通过 Hadoop 的 用 户 界 面 查 看 日 志 信 息 , 使 用 浏 览 器 访 问 http://master:50070,点击 Utilities-->Logs
通过命令查看Hadoop日志
[hadoop@master ~]$ cd /usr/local/src/hadoop/logs/
[hadoop@master logs]$ ll
total 808
-rw-rw-r--. 1 hadoop hadoop 341440 May 27 16:02 hadoop-hadoop-namenode-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 716 May 27 15:56 hadoop-hadoop-namenode-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 4960 May 26 13:49 hadoop-hadoop-namenode-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:39 hadoop-hadoop-namenode-master.example.com.out.1.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:20 hadoop-hadoop-namenode-master.example.com.out.2.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:18 hadoop-hadoop-namenode-master.example.com.out.3.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 11:12 hadoop-hadoop-namenode-master.example.com.out.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 41275 May 27 15:57 hadoop-hadoop-secondarynamenode-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 94961 May 25 11:39 hadoop-hadoop-secondarynamenode-master.example.com.log.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 27 15:56 hadoop-hadoop-secondarynamenode-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 716 May 26 13:43 hadoop-hadoop-secondarynamenode-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:39 hadoop-hadoop-secondarynamenode-master.example.com.out.1.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:20 hadoop-hadoop-secondarynamenode-master.example.com.out.2.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:18 hadoop-hadoop-secondarynamenode-master.example.com.out.3.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 11:12 hadoop-hadoop-secondarynamenode-master.example.com.out.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 70395 May 27 16:03 mapred-hadoop-historyserver-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 2031 May 27 15:57 mapred-hadoop-historyserver-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 0 May 26 13:46 mapred-hadoop-historyserver-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 0 May 25 11:39 mapred-hadoop-historyserver-master.example.com.out.2
-rw-rw-r--. 1 hadoop hadoop 0 May 26 13:43 SecurityAuth-hadoop.audit
-rw-rw-r--. 1 hadoop hadoop 0 May 25 10:18 SecurityAuth-hadoop.audit.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 187718 May 27 15:57 yarn-hadoop-resourcemanager-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 2078 May 27 16:01 yarn-hadoop-resourcemanager-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 2078 May 26 13:44 yarn-hadoop-resourcemanager-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 700 May 25 10:39 yarn-hadoop-resourcemanager-master.example.com.out.1.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 700 May 25 10:20 yarn-hadoop-resourcemanager-master.example.com.out.2.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 2086 May 25 11:31 yarn-hadoop-resourcemanager-master.example.com.out.COMPLETED
查看HBase日志
Hbase提供了Web用户界面对日志文件的查看,使用浏览器访问http://master:60010, 显示 HBase 的 web 主界面
点击“Local Logs”菜单打开 HBase 的日志列表
点击其中一条链接来访问相应的日志信息
查看Hive日志
[root@master ~]# cd /tmp/hadoop
[root@master hadoop]# ll
total 24
-rw-rw-r--. 1 hadoop hadoop 19376 May 25 11:21 hive.log
-rw-rw-r--. 1 hadoop hadoop 1259 May 25 11:21 stderr
使用 cat 命令查看 hive.log 日志文件
[root@master hadoop]# cat hive.log
2022-05-25T10:39:45,121 INFO [main]: SessionState (SessionState.java:printInfo(1007)) -
Logging initialized using configuration in jar:file:/usr/local/src/hive/lib/hive-common-2.0.0.jar!/hive-log4j2.properties
2022-05-25T10:39:45,412 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:newRawStore(499)) - 0: Opening raw store with implementation class:org.apache.hadoop.hive.metastore.ObjectStore
2022-05-25T10:39:45,467 INFO [main]: metastore.ObjectStore (ObjectStore.java:initialize(318)) - ObjectStore, initialize called
2022-05-25T10:39:46,349 INFO [main]: metastore.ObjectStore (ObjectStore.java:getPMF(402)) - Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes="Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order"
2022-05-25T10:39:47,758 INFO [main]: metastore.MetaStoreDirectSql (MetaStoreDirectSql.java:(142)) - Using direct SQL, underlying DB is MYSQL
2022-05-25T10:39:47,761 INFO [main]: metastore.ObjectStore (ObjectStore.java:setConf(301)) - Initialized ObjectStore
2022-05-25T10:39:47,967 WARN [main]: metastore.ObjectStore (ObjectStore.java:getDatabase(604)) - Failed to get database default, returning NoSuchObjectException
2022-05-25T10:39:48,822 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:createDefaultRoles_core(586)) - Added admin role in metastore
2022-05-25T10:39:48,828 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:createDefaultRoles_core(595)) - Added public role in metastore
2022-05-25T10:39:48,879 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:addAdminUsers_core(635)) - No user is added in admin role, since config is empty
2022-05-25T10:39:48,968 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(669)) - 0: get_all_functions
2022-05-25T10:39:48,969 INFO [main]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(280)) - ugi=hadoop ip=unknown-ip-addr cmd=get_all_functions
2022-05-25T10:39:49,440 INFO [main]: session.SessionState (SessionState.java:createPath(684)) - Created HDFS directory: /tmp/hive/hadoop
2022-05-25T10:39:49,445 INFO [main]: session.SessionState (SessionState.java:createPath(684)) - Created HDFS directory: /tmp/hive/hadoop/718a9582-727b-42cf-8315-3d4be6b5dd89
2022-05-25T10:39:49,476 INFO [main]: session.SessionState (SessionState.java:createPath(684)) - Created local directory: /usr/local/src/hive/tmp/718a9582-727b-42cf-8315-3d4be6b5dd89
2022-05-25T10:39:49,481 INFO [main]: session.SessionState (SessionState.java:createPath(684)) - Created HDFS directory: /tmp/hive/hadoop/718a9582-727b-42cf-8315-3d4be6b5dd89/_tmp_space.db
2022-05-25T10:39:49,492 INFO [main]: conf.HiveConf (HiveConf.java:getLogIdVar(3174)) - Using the default value passed in for log id: 718a9582-727b-42cf-8315-3d4be6b5dd89
2022-05-25T10:39:49,493 INFO [718a9582-727b-42cf-8315-3d4be6b5dd89 main]: CliDriver (SessionState.java:printInfo(1007)) - Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
2022-05-25T11:21:08,555 INFO [main]: SessionState (SessionState.java:printInfo(1007)) -
Logging initialized using configuration in jar:file:/usr/local/src/hive/lib/hive-common-2.0.0.jar!/hive-log4j2.properties
2022-05-25T11:21:08,904 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:newRawStore(499)) - 0: Opening raw store with implementation class:org.apache.hadoop.hive.metastore.ObjectStore
2022-05-25T11:21:08,970 INFO [main]: metastore.ObjectStore (ObjectStore.java:initialize(318)) - ObjectStore, initialize called
2022-05-25T11:21:09,984 INFO [main]: metastore.ObjectStore (ObjectStore.java:getPMF(402)) - Setting MetaStore object pin classes with hive.metastore.cache.pinobjtypes="Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order"
2022-05-25T11:21:11,568 INFO [main]: metastore.MetaStoreDirectSql (MetaStoreDirectSql.java:(142)) - Using direct SQL, underlying DB is MYSQL
2022-05-25T11:21:11,573 INFO [main]: metastore.ObjectStore (ObjectStore.java:setConf(301)) - Initialized ObjectStore
2022-05-25T11:21:11,836 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:createDefaultRoles_core(586)) - Added admin role in metastore
2022-05-25T11:21:11,840 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:createDefaultRoles_core(595)) - Added public role in metastore
2022-05-25T11:21:11,912 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:addAdminUsers_core(635)) - No user is added in admin role, since config is empty
2022-05-25T11:21:12,055 INFO [main]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(669)) - 0: get_all_functions
2022-05-25T11:21:12,057 INFO [main]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(280)) - ugi=hadoop ip=unknown-ip-addr cmd=get_all_functions
2022-05-25T11:21:13,300 INFO [main]: session.SessionState (SessionState.java:createPath(684)) - Created HDFS directory: /tmp/hive/hadoop/98d03a6e-9cce-4723-9586-4998cd0bb410
2022-05-25T11:21:13,337 INFO [main]: session.SessionState (SessionState.java:createPath(684)) - Created local directory: /usr/local/src/hive/tmp/98d03a6e-9cce-4723-9586-4998cd0bb410
2022-05-25T11:21:13,346 INFO [main]: session.SessionState (SessionState.java:createPath(684)) - Created HDFS directory: /tmp/hive/hadoop/98d03a6e-9cce-4723-9586-4998cd0bb410/_tmp_space.db
2022-05-25T11:21:13,357 INFO [main]: conf.HiveConf (HiveConf.java:getLogIdVar(3174)) - Using the default value passed in for log id: 98d03a6e-9cce-4723-9586-4998cd0bb410
2022-05-25T11:21:13,358 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 main]: CliDriver (SessionState.java:printInfo(1007)) - Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. Consider using a different execution engine (i.e. spark, tez) or using Hive 1.X releases.
2022-05-25T11:21:19,852 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 main]: conf.HiveConf (HiveConf.java:getLogIdVar(3174)) - Using the default value passed in for log id: 98d03a6e-9cce-4723-9586-4998cd0bb410
2022-05-25T11:21:19,892 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:compile(415)) - Compiling command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c): use sample
2022-05-25T11:21:20,380 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(669)) - 0: get_database: sample
2022-05-25T11:21:20,380 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(280)) - ugi=hadoop ip=unknown-ip-addr cmd=get_database: sample
2022-05-25T11:21:20,389 WARN [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: metastore.ObjectStore (ObjectStore.java:getDatabase(604)) - Failed to get database sample, returning NoSuchObjectException
2022-05-25T11:21:20,400 ERROR [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (SessionState.java:printError(1016)) - FAILED: SemanticException [Error 10072]: Database does not exist: sample
org.apache.hadoop.hive.ql.parse.SemanticException: Database does not exist: sample
at org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.getDatabase(BaseSemanticAnalyzer.java:1401)
at org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer.analyzeSwitchDatabase(DDLSemanticAnalyzer.java:834)
at org.apache.hadoop.hive.ql.parse.DDLSemanticAnalyzer.analyzeInternal(DDLSemanticAnalyzer.java:450)
at org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.analyze(BaseSemanticAnalyzer.java:239)
at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:479)
at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:319)
at org.apache.hadoop.hive.ql.Driver.compileInternal(Driver.java:1255)
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:1301)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1184)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1172)
at org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:233)
at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:184)
at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:400)
at org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:778)
at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:717)
at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:645)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
2022-05-25T11:21:20,400 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:compile(557)) - Completed compiling command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c); Time taken: 0.534 seconds
2022-05-25T11:21:35,906 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 main]: conf.HiveConf (HiveConf.java:getLogIdVar(3174)) - Using the default value passed in for log id: 98d03a6e-9cce-4723-9586-4998cd0bb410
2022-05-25T11:21:35,907 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:compile(415)) - Compiling command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c): show tables
2022-05-25T11:21:35,915 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(669)) - 0: get_database: default
2022-05-25T11:21:35,915 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(280)) - ugi=hadoop ip=unknown-ip-addr cmd=get_database: default
2022-05-25T11:21:35,931 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:compile(485)) - Semantic Analysis Completed
2022-05-25T11:21:36,006 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:getSchema(251)) - Returning Hive schema: Schema(fieldSchemas:[FieldSchema(name:tab_name, type:string, comment:from deserializer)], properties:null)
2022-05-25T11:21:36,059 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: exec.ListSinkOperator (Operator.java:initialize(323)) - Initializing operator OP[0]
2022-05-25T11:21:36,066 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:compile(557)) - Completed compiling command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c); Time taken: 0.158 seconds
2022-05-25T11:21:36,066 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:checkConcurrency(171)) - Concurrency mode is disabled, not creating a lock manager
2022-05-25T11:21:36,071 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:execute(1499)) - Executing command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c): show tables
2022-05-25T11:21:36,075 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:launchTask(1838)) - Starting task [Stage-0:DDL] in serial mode
2022-05-25T11:21:36,076 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(669)) - 0: get_database: default
2022-05-25T11:21:36,076 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(280)) - ugi=hadoop ip=unknown-ip-addr cmd=get_database: default
2022-05-25T11:21:36,080 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(669)) - 0: get_tables: db=default pat=.*
2022-05-25T11:21:36,081 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(280)) - ugi=hadoop ip=unknown-ip-addr cmd=get_tables: db=default pat=.*
2022-05-25T11:21:36,112 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:execute(1747)) - Completed executing command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c); Time taken: 0.041 seconds
2022-05-25T11:21:36,112 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (SessionState.java:printInfo(1007)) - OK
2022-05-25T11:21:36,125 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: CliDriver (SessionState.java:printInfo(1007)) - Time taken: 0.205 seconds
2022-05-25T11:21:44,667 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 main]: conf.HiveConf (HiveConf.java:getLogIdVar(3174)) - Using the default value passed in for log id: 98d03a6e-9cce-4723-9586-4998cd0bb410
2022-05-25T11:21:44,668 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:compile(415)) - Compiling command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c): select * from student
2022-05-25T11:21:44,704 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: parse.CalcitePlanner (SemanticAnalyzer.java:analyzeInternal(10092)) - Starting Semantic Analysis
2022-05-25T11:21:44,705 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: parse.CalcitePlanner (SemanticAnalyzer.java:genResolvedParseTree(10039)) - Completed phase 1 of Semantic Analysis
2022-05-25T11:21:44,706 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: parse.CalcitePlanner (SemanticAnalyzer.java:getMetaData(1561)) - Get metadata for source tables
2022-05-25T11:21:44,706 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: metastore.HiveMetaStore (HiveMetaStore.java:logInfo(669)) - 0: get_table : db=default tbl=student
2022-05-25T11:21:44,706 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: HiveMetaStore.audit (HiveMetaStore.java:logAuditEvent(280)) - ugi=hadoop ip=unknown-ip-addr cmd=get_table : db=default tbl=student
2022-05-25T11:21:44,711 ERROR [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: parse.CalcitePlanner (SemanticAnalyzer.java:getMetaData(1860)) - org.apache.hadoop.hive.ql.parse.SemanticException: Line 1:14 Table not found 'student'
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.getMetaData(SemanticAnalyzer.java:1604)
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.getMetaData(SemanticAnalyzer.java:1554)
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genResolvedParseTree(SemanticAnalyzer.java:10042)
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.analyzeInternal(SemanticAnalyzer.java:10093)
at org.apache.hadoop.hive.ql.parse.CalcitePlanner.analyzeInternal(CalcitePlanner.java:229)
at org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.analyze(BaseSemanticAnalyzer.java:239)
at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:479)
at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:319)
at org.apache.hadoop.hive.ql.Driver.compileInternal(Driver.java:1255)
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:1301)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1184)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1172)
at org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:233)
at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:184)
at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:400)
at org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:778)
at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:717)
at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:645)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
2022-05-25T11:21:44,712 ERROR [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (SessionState.java:printError(1016)) - FAILED: SemanticException [Error 10001]: Line 1:14 Table not found 'student'
org.apache.hadoop.hive.ql.parse.SemanticException: Line 1:14 Table not found 'student'
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.getMetaData(SemanticAnalyzer.java:1861)
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.getMetaData(SemanticAnalyzer.java:1554)
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.genResolvedParseTree(SemanticAnalyzer.java:10042)
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.analyzeInternal(SemanticAnalyzer.java:10093)
at org.apache.hadoop.hive.ql.parse.CalcitePlanner.analyzeInternal(CalcitePlanner.java:229)
at org.apache.hadoop.hive.ql.parse.BaseSemanticAnalyzer.analyze(BaseSemanticAnalyzer.java:239)
at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:479)
at org.apache.hadoop.hive.ql.Driver.compile(Driver.java:319)
at org.apache.hadoop.hive.ql.Driver.compileInternal(Driver.java:1255)
at org.apache.hadoop.hive.ql.Driver.runInternal(Driver.java:1301)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1184)
at org.apache.hadoop.hive.ql.Driver.run(Driver.java:1172)
at org.apache.hadoop.hive.cli.CliDriver.processLocalCmd(CliDriver.java:233)
at org.apache.hadoop.hive.cli.CliDriver.processCmd(CliDriver.java:184)
at org.apache.hadoop.hive.cli.CliDriver.processLine(CliDriver.java:400)
at org.apache.hadoop.hive.cli.CliDriver.executeDriver(CliDriver.java:778)
at org.apache.hadoop.hive.cli.CliDriver.run(CliDriver.java:717)
at org.apache.hadoop.hive.cli.CliDriver.main(CliDriver.java:645)
at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method)
at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:62)
at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43)
at java.lang.reflect.Method.invoke(Method.java:498)
at org.apache.hadoop.util.RunJar.run(RunJar.java:221)
at org.apache.hadoop.util.RunJar.main(RunJar.java:136)
Caused by: org.apache.hadoop.hive.ql.parse.SemanticException: Line 1:14 Table not found 'student'
at org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.getMetaData(SemanticAnalyzer.java:1604)
... 23 more
2022-05-25T11:21:44,712 INFO [98d03a6e-9cce-4723-9586-4998cd0bb410 98d03a6e-9cce-4723-9586-4998cd0bb410 main]: ql.Driver (Driver.java:compile(557)) - Completed compiling command(queryId=hadoop_20220525112119_7ebdbf10-f40c-4496-9406-ab567c9cec8c); Time taken: 0.045 seconds
查看大数据平台告警信息
查看大数据平台主机告警信息
#查询系统错误告警信息
[root@master ~]# journalctl -p err..alert
-- Logs begin at Fri 2022-05-27 15:41:45 CST, end at Fri 2022-05>
May 27 15:41:46 localhost.localdomain kernel: Warning: Unmaintai>
May 27 15:41:46 localhost.localdomain kernel: sd 2:0:0:0: [sda] >
May 27 15:41:59 master.example.com kernel: piix4_smbus 0000:00:0>
查看Hadoop告警信息
[root@master ~]# cd /usr/local/src/hadoop/logs/
[root@master logs]# ll
total 808
-rw-rw-r--. 1 hadoop hadoop 342316 May 27 16:06 hadoop-hadoop-namenode-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 716 May 27 15:56 hadoop-hadoop-namenode-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 4960 May 26 13:49 hadoop-hadoop-namenode-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:39 hadoop-hadoop-namenode-master.example.com.out.1.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:20 hadoop-hadoop-namenode-master.example.com.out.2.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:18 hadoop-hadoop-namenode-master.example.com.out.3.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 11:12 hadoop-hadoop-namenode-master.example.com.out.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 41275 May 27 15:57 hadoop-hadoop-secondarynamenode-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 94961 May 25 11:39 hadoop-hadoop-secondarynamenode-master.example.com.log.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 27 15:56 hadoop-hadoop-secondarynamenode-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 716 May 26 13:43 hadoop-hadoop-secondarynamenode-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:39 hadoop-hadoop-secondarynamenode-master.example.com.out.1.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:20 hadoop-hadoop-secondarynamenode-master.example.com.out.2.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 10:18 hadoop-hadoop-secondarynamenode-master.example.com.out.3.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 716 May 25 11:12 hadoop-hadoop-secondarynamenode-master.example.com.out.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 70637 May 27 16:09 mapred-hadoop-historyserver-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 2031 May 27 15:57 mapred-hadoop-historyserver-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 0 May 26 13:46 mapred-hadoop-historyserver-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 0 May 25 11:39 mapred-hadoop-historyserver-master.example.com.out.2
-rw-rw-r--. 1 hadoop hadoop 0 May 26 13:43 SecurityAuth-hadoop.audit
-rw-rw-r--. 1 hadoop hadoop 0 May 25 10:18 SecurityAuth-hadoop.audit.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 187718 May 27 15:57 yarn-hadoop-resourcemanager-master.example.com.log
-rw-rw-r--. 1 hadoop hadoop 2078 May 27 16:01 yarn-hadoop-resourcemanager-master.example.com.out
-rw-rw-r--. 1 hadoop hadoop 2078 May 26 13:44 yarn-hadoop-resourcemanager-master.example.com.out.1
-rw-rw-r--. 1 hadoop hadoop 700 May 25 10:39 yarn-hadoop-resourcemanager-master.example.com.out.1.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 700 May 25 10:20 yarn-hadoop-resourcemanager-master.example.com.out.2.COMPLETED
-rw-rw-r--. 1 hadoop hadoop 2086 May 25 11:31 yarn-hadoop-resourcemanager-master.example.com.out.COMPLETED
我们通过查看某个日志文件中包含告警信息的行,然后将这些行显示出来,如查询 ResourceManager 日记最新 1000 行且包含“info”关键字的告警信息
[root@master logs]# tail -1000f yarn-hadoop-resourcemanager-master.example.com.log | grep INFO
2022-05-25 11:12:08,287 INFO org.apache.hadoop.ipc.Server: Stopping server on 8032
2022-05-25 11:12:08,303 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server listener on 8032
2022-05-25 11:12:08,305 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server Responder
2022-05-25 11:12:08,306 INFO org.apache.hadoop.ipc.Server: Stopping server on 8033
2022-05-25 11:12:08,310 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server listener on 8033
2022-05-25 11:12:08,312 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server Responder
2022-05-25 11:12:08,312 INFO org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Transitioning to standby state
2022-05-25 11:12:08,318 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: Stopping ResourceManager metrics system...
2022-05-25 11:12:08,333 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: ResourceManager metrics system stopped.
2022-05-25 11:12:08,335 INFO org.apache.hadoop.metrics2.impl.MetricsSystemImpl: ResourceManager metrics system shutdown complete.
2022-05-25 11:12:08,336 INFO org.apache.hadoop.yarn.event.AsyncDispatcher: AsyncDispatcher is draining to stop, igonring any new events.
2022-05-25 11:12:08,343 INFO org.apache.hadoop.ipc.Server: Stopping server on 8030
2022-05-25 11:12:08,361 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server listener on 8030
2022-05-25 11:12:08,362 INFO org.apache.hadoop.ipc.Server: Stopping server on 8031
2022-05-25 11:12:08,364 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server Responder
2022-05-25 11:12:08,367 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server listener on 8031
2022-05-25 11:12:08,372 INFO org.apache.hadoop.yarn.util.AbstractLivelinessMonitor: NMLivelinessMonitor thread interrupted
2022-05-25 11:12:08,372 INFO org.apache.hadoop.ipc.Server: Stopping IPC Server Responder
2022-05-25 11:12:08,380 INFO org.apache.hadoop.yarn.event.AsyncDispatcher: AsyncDispatcher is draining to stop, igonring any new events.
2022-05-25 11:12:08,385 INFO org.apache.hadoop.yarn.util.AbstractLivelinessMonitor: org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer thread interrupted
2022-05-25 11:12:08,385 INFO org.apache.hadoop.yarn.util.AbstractLivelinessMonitor: AMLivelinessMonitor thread interrupted
2022-05-25 11:12:08,385 INFO org.apache.hadoop.yarn.util.AbstractLivelinessMonitor: AMLivelinessMonitor thread interrupted
2022-05-25 11:12:08,395 INFO org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: Transitioned to standby state
2022-05-25 11:12:08,398 INFO org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: SHUTDOWN_MSG:
2022-05-25 11:12:47,993 INFO org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: STARTUP_MSG:
2022-05-25 11:12:48,000 INFO org.apache.hadoop.yarn.server.resourcemanager.ResourceManager: registered UNIX signal handlers for [TERM, HUP, INT]
2022-05-25 11:12:48,216 INFO org.apache.hadoop.conf.Configuration: found resource core-site.xml at file:/usr/local/src/hadoop/etc/hadoop/core-site.xml
2022-05-25 11:12:48,272 INFO org.apache.hadoop.security.Groups: clearing userToGroupsMap cache
2022-05-25 11:12:48,320 INFO org.apache.hadoop.conf.Configuration: found resource yarn-site.xml at file:/usr/local/src/hadoop/etc/hadoop/yarn-site.xml
查看HBase告警信息
变更日志告警级别
在 HBase 的 Web 用户界面提供了日志告警级别的查询和设置功能。在浏览器中访问 http://master:60010/logLevel 页面
若要查询某个日志的告警级别,输入该日志名,点击“Get Log Level”按钮,显示该 日志的告警级别
如果要 将该日志告警级别调整为 WARN,则在第二个框中输入 Log:habase-hadoop-mastermaster.log,Level:WARN,点击“Set Log Level”按钮
查询日志告警信息
查看hbase-hadoop-master-master.log 文件的“INFO”告警信息
[root@master logs]# cd /usr/local/src/hbase/logs/
[root@master logs]# tail -100f hbase-hadoop-master-master.example.com.log | grep INFO
2022-05-27 16:16:40,160 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 598085 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:41,686 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 599611 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:43,212 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 601137 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:44,738 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 602663 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:46,264 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 604189 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:47,790 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 605715 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:49,315 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 607240 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:50,841 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 608766 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:52,366 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 610291 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:53,892 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 611817 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:55,418 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 613343 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:56,944 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 614869 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:58,469 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 616394 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:16:59,995 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 617920 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
查看 hbase-hadoop-master-master.log 文件的“WARN”级别告警信息
[root@master hadoop]# cd /usr/local/src/hbase/logs
[root@master logs]# tail -100f hbase-hadoop-master-master.log |grep WARN
查看Hive告警信息
[root@master ~]# cd /tmp/hadoop
[root@master ~]# tail -1000f hive.log |grep INFO
2022-05-27 16:21:10,255 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 868179 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:11,780 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 869705 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:13,306 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 871231 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:14,831 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 872756 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:16,357 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 874282 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:17,882 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 875807 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:19,408 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 877333 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:20,934 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 878859 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:22,460 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 880385 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:23,985 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 881910 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:25,510 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 883435 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:27,036 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 884961 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:28,562 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 886487 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:30,088 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 888013 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:31,614 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 889539 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:33,140 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 891065 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:34,666 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 892591 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:36,192 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 894117 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:37,722 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 895647 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:39,248 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 897173 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.
2022-05-27 16:21:40,774 INFO [master:16000.activeMasterManager] master.ServerManager: Waiting for region servers count to settle; currently checked in 0, slept for 898699 ms, expecting minimum of 1, maximum of 2147483647, timeout of 4500 ms, interval of 1500 ms.