MongoDB 12节点高可用分片集群安装搭建(3.2版)
12节点分片集群安装搭建(3.2版)
主机 | MongoDB01 | MongoDB02 | MongoDB03 | MongoDB04 | MongoDB05 | MongoDB06 |
IP | 10.10.1.5 | 10.10.1.6 | 10.10.1.7 | 10.10.1.8 | 10.10.1.9 | 10.10.1.10 |
副本集(20081)数据 | SH1RS | SH1RS | SH1RS | SH2RS | SH2RS | SH2RS |
主机 | MongoDB07 | MongoDB08 | MongoDB09 | MongoDB10 | MongoDB11 | MongoDB12 |
IP | 10.10.1.11 | 10.10.1.12 | 10.10.1.13 | 10.10.1.14 | 10.10.1.15 | 10.10.1.16 |
副本集(20081)数据 | SH3RS | SH3RS | SH3RS | SH4RS | SH4RS | SH4RS |
副本集(20082)配置 | CFRS | CFRS | CFRS | |||
28003 路由 | Y | Y | Y |
cd /mongodb/conf/28001 openssl rand -base64 102 > .keyFile chmod 400 .keyFile 将文件拷到其它节点相同目录下 vi mongo_sh1rs_28001.conf --不同sh的配置文件全称不同 port = 28001 dbpath = /mongodb/data/28001 logpath = /mongodb/log/28001 pidfilepath = /mongodb/data/28001/28001.pid logappend = true fork = true oplogSize = 204800 replSet = SH1RS --其它对应为SH2RS/SH3RS/SH4RS #keyFile = /mongodb/conf/28001/.keyFile --启动mongod numactl --interleave=all mongod -f /mongodb/conf/28001/mongo_sh1rs_28001.conf --配置副本集 repConfig = { _id:'SH1RS', members:[ {_id:0,host:'MongoDB01:28001'}, {_id:1,host:'MongoDB02:28001'}, {_id:2,host:'MongoDB03:28001'}] } rs.initiate(repConfig) rs.status()
SH1RS:SECONDARY> rs.status()
2
3
"set" "SH1RS"
4
"date" ISODate("2019-03-14T14:12:17.210Z")
5
"myState" 2
6
"term" NumberLong(1)
7
"syncingTo" "MongoDB01:28001"
8
"heartbeatIntervalMillis" NumberLong(2000)
9
"members"
10
11
"_id" 0
12
"name" "MongoDB01:28001"
13
"health" 1
14
"state" 1
15
"stateStr" "PRIMARY"
16
"uptime" 655
17
"optime"
18
"ts" Timestamp(1552572310 2)
19
"t" NumberLong(1)
20
21
"optimeDate" ISODate("2019-03-14T14:05:10Z")
22
"lastHeartbeat" ISODate("2019-03-14T14:12:15.436Z")
23
"lastHeartbeatRecv" ISODate("2019-03-14T14:12:16.769Z")
24
"pingMs" NumberLong(0)
25
"electionTime" Timestamp(1552572066 1)
26
"electionDate" ISODate("2019-03-14T14:01:06Z")
27
"configVersion" 1
28
29
30
"_id" 1
31
"name" "MongoDB02:28001"
32
"health" 1
33
"state" 2
34
"stateStr" "SECONDARY"
35
"uptime" 655
36
"optime"
37
"ts" Timestamp(1552572310 2)
38
"t" NumberLong(1)
39
40
"optimeDate" ISODate("2019-03-14T14:05:10Z")
41
"lastHeartbeat" ISODate("2019-03-14T14:12:15.436Z")
42
"lastHeartbeatRecv" ISODate("2019-03-14T14:12:15.436Z")
43
"pingMs" NumberLong(0)
44
"syncingTo" "MongoDB01:28001"
45
"configVersion" 1
46
47
48
"_id" 2
49
"name" "MongoDB03:28001"
50
"health" 1
51
"state" 2
52
"stateStr" "SECONDARY"
53
"uptime" 1002
54
"optime"
55
"ts" Timestamp(1552572310 2)
56
"t" NumberLong(1)
57
58
"optimeDate" ISODate("2019-03-14T14:05:10Z")
59
"syncingTo" "MongoDB01:28001"
60
"configVersion" 1
61
"self" true
62
63
64
"ok" 1
65
--测试复制:
use test
db.testc.insert({id:1,test:"from Mongodb01,the primary"})
db.testc.find()
--连接到备节点:
use test
2
rs.slaveOk()
3
db.testc.find()
4
SH1RS:SECONDARY> db.testc.find()db.testc.find()
5
"_id" ObjectId("5c8a5f960304e7b4a4ac66a4") "id" 1 "test" "from Mongodb01,the primary"
----------------------------------------------------------------------------------------------------
--Deploy the config servers as a three-member replica set
mkdir /mongodb/data/28002
mkdir /mongodb/conf/28002
mkdir /mongodb/log/28002cd /mongodb/conf/28002 KeyFile从MongoDB01点拷过来,统一放在28001目录下 vi mongo_cfgrs_28002.conf port = 28002 dbpath = /mongodb/data/28002 logpath = /mongodb/log/28002 pidfilepath = /mongodb/data/28002/28002.pid logappend = true fork = true oplogSize = 204800 replSet = CFGRS #keyFile = /mongodb/conf/28001/.keyFile --启动mongod numactl --interleave=all mongod --configsvr -f /mongodb/conf/28001/mongo_sh1rs_28001.conf -- 注意,配置服务库的启动一定要加上--configsvr参数 --配置副本集 repConfig = { _id:'CFGRS', configsvr: true, members:[ {_id:0,host:'MongoDB07:28002'}, {_id:1,host:'MongoDB08:28002'}, {_id:2,host:'MongoDB09:28002'}] } rs.initiate(repConfig) rs.status() --Config and Start mongos instance mkdir /mongodb/conf/28003 mkdir /mongodb/log/28003
cd /mongodb/conf/28003 KeyFile从其它节点拷过来 vi mongos_28003.conf port = 28003 logpath = /mongodb/log/28002 pidfilepath = /mongodb/data/28002/28002.pid logappend = true fork = true configdb = CFGRS/MongoDB07:28002,MongoDB08:28002,MongoDB09:28002 #keyFile = /mongodb/conf/28001/.keyFile --启动mongos numactl --interleave=all mongos -f /mongodb/conf/28003/mongos_28003.conf --Add Replica Sets as Shards 连接到mongos mongo MongoDB10:28003/admin sh.addShard( "SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001" ); sh.addShard( "SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001" ); sh.addShard( "SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001" );
sh.addShard( "SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001" );
sh.status()
--Enable Keyfile Access Control
在所有角色的配置文件中添加: keyFile = /mongodb/conf/28001/.keyFileCreate the shard-local user administrator (optional)
在四个分片的主节点都执行: admin = db.getSiblingDB("admin") admin.createUser( { user: "mongoadmin", pwd: "Super**********", roles: [ { role: "root", db: "admin" } ] } ) -------------------------SH1RS:PRIMARY> admin = db.getSiblingDB("admin")admin = db.getSiblingDB("admin")
2
admin
3
SH1RS:PRIMARY> admin.createUser(admin.createUser(
4
...
5
... user"mongoadmin" user"mongoadmin"
6
... pwd"Super**********" pwd"Super**********"
7
... roles role"root" db"admin" roles role"root" db"admin"
8
...
9
... ))
10
Successfully added user
11
"user" "mongoadmin"
12
"roles"
13
14
"role" "root"
15
"db" "admin"
16
17
18
mongo -u "mongoadmin" -p "Super**********" --authenticationDatabase "admin" --port 28001
-----------------------------------------
Create the user administrator
admin = db.getSiblingDB("admin") admin.createUser( { user: "shadmin", pwd: "shSuper**********", roles: [ { role: "root", db: "admin" } ] } ) -----------------------mongos> admin = db.getSiblingDB("admin")admin = db.getSiblingDB("admin")
2
admin
3
mongos> admin.createUser(admin.createUser(
4
...
5
... user"shadmin" user"shadmin"
6
... pwd"shSuper**********" pwd"shSuper**********"
7
... roles role"root" db"admin" roles role"root" db"admin"
8
...
9
... ))
10
Successfully added user
11
"user" "shadmin"
12
"roles"
13
14
"role" "root"
15
"db" "admin"
16
17
18
注意:创建用户之后,本地操作系统登录就失效了,后续需要使用账号密码登录
-------------------------------
查看分片状态:
"shadmin" -p "shSuper**********" --authenticationDatabase "admin" --port 28003 mongodb@MongoDB10 28003 $ mongo -u
2
MongoDB shell version3.2.22
3
connecting to 127.0.0.1 28003/test
4
mongos>
5
mongos>
6
mongos> sh.status()sh.status()
7
--- Sharding Status ---
8
sharding version
9
"_id" 1
10
"minCompatibleVersion" 5
11
"currentVersion" 6
12
"clusterId" ObjectId("5c939dcc871df831ca6178dc")
13
14
shards
15
{ "_id" "SH1RS", "host" "SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
16
{ "_id" "SH2RS", "host" "SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
17
{ "_id" "SH3RS", "host" "SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
18
{ "_id" "SH4RS", "host" "SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
19
active mongoses
20
"3.2.22" 1
21
balancer
22
Currently enabledyes
23
Currently runningno
24
Failed balancer rounds in last 5 attempts 5
25
Last reported error could not find host matching read preference mode"primary" for set SH1RS
26
Time of Reported error Sat Mar 23 2019 14 48 58 GMT+0800 (CST)
27
Migration Results for the last 24 hours
28
No recent migrations
29
databases
30
{ "_id" "test", "primary" "SH1RS", "partitioned" false
--------SHARD功能测试:
在未shard的情况下插入数据:
use test
2
var bulk = db.test_collection.initializeUnorderedBulkOp();
3
people = "Marc" "Bill" "George" "Eliot" "Matt" "Trey" "Tracy" "Greg" "Steve" "Kristina" "Katie" "Jeff" ;
4
for(var i=0; i<1000000; i++)
5
user_id = i;
6
name = people Math.floor(Math.random()*people.length) ;
7
number = Math.floor(Math.random()*10001);
8
bulk.insert( "user_id":user_id "name":name "number":number );
9
10
bulk.execute();
数据只在一节点:
mongos> db.stats()db.stats()
2
3
"raw"
4
"SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
5
"db" "test"
6
"collections" 2
7
"objects" 1520654
8
"avgObjSize" 70.83500454409747
9
"dataSize" 107715533
10
"storageSize" 39489536
11
"numExtents" 0
12
"indexes" 3
13
"indexSize" 30613504
14
"ok" 1
15
"$gleStats"
16
"lastOpTime" Timestamp(0 0)
17
"electionId" ObjectId("7fffffff0000000000000005")
18
19
20
"SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
21
"db" "test"
22
"collections" 0
23
"objects" 0
24
"avgObjSize" 0
25
"dataSize" 0
26
"storageSize" 0
27
"numExtents" 0
28
"indexes" 0
29
"indexSize" 0
30
"fileSize" 0
31
"ok" 1
32
"$gleStats"
33
"lastOpTime" Timestamp(0 0)
34
"electionId" ObjectId("7fffffff0000000000000004")
35
36
37
"SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
38
"db" "test"
39
"collections" 0
40
"objects" 0
41
"avgObjSize" 0
42
"dataSize" 0
43
"storageSize" 0
44
"numExtents" 0
45
"indexes" 0
46
"indexSize" 0
47
"fileSize" 0
48
"ok" 1
49
"$gleStats"
50
"lastOpTime" Timestamp(0 0)
51
"electionId" ObjectId("7fffffff0000000000000004")
52
53
54
"SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
55
"db" "test"
56
"collections" 0
57
"objects" 0
58
"avgObjSize" 0
59
"dataSize" 0
60
"storageSize" 0
61
"numExtents" 0
62
"indexes" 0
63
"indexSize" 0
64
"fileSize" 0
65
"ok" 1
66
"$gleStats"
67
"lastOpTime" Timestamp(0 0)
68
"electionId" ObjectId("7fffffff0000000000000003")
69
70
71
72
"objects" 1520654
73
"avgObjSize" 70
74
"dataSize" 107715533
75
"storageSize" 39489536
76
"numExtents" 0
77
"indexes" 3
78
"indexSize" 30613504
79
"fileSize" 0
80
"extentFreeList"
81
"num" 0
82
"totalSize" 0
83
84
"ok" 1
85
启用分片:
mongos> sh.enableSharding( "test" )sh.enableSharding( "test" )
2
"ok" 1
3
mongos> db.test_collection.createIndex( { number 1 )db.test_collection.createIndex( number 1 )
4
5
"raw"
6
"SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
7
"createdCollectionAutomatically" false
8
"numIndexesBefore" 2
9
"numIndexesAfter" 2
10
"note" "all indexes already exist"
11
"ok" 1
12
"$gleStats"
13
"lastOpTime" Timestamp(1553326440 4000)
14
"electionId" ObjectId("7fffffff0000000000000005")
15
16
17
,
18
"ok" 1
19
20
mongos> sh.shardCollection( "test.test_collection", { "number" 1 )sh.shardCollection( "test.test_collection", "number" 1 )
21
"collectionsharded" "test.test_collection", "ok" 1
22
mongos>
23
查看数据自动迁移:
mongos> sh.status()sh.status()
2
--- Sharding Status ---
3
sharding version
4
"_id" 1
5
"minCompatibleVersion" 5
6
"currentVersion" 6
7
"clusterId" ObjectId("5c939dcc871df831ca6178dc")
8
9
shards
10
{ "_id" "SH1RS", "host" "SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
11
{ "_id" "SH2RS", "host" "SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
12
{ "_id" "SH3RS", "host" "SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
13
{ "_id" "SH4RS", "host" "SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
14
active mongoses
15
"3.2.22" 3
16
balancer
17
Currently enabledyes
18
Currently runningyes
19
NaN
20
Collections with active migrations
21
test.test_collection started at Sat Mar 23 2019 15:39:27 GMT+0800 (CST)
22
Failed balancer rounds in last 5 attempts 4
23
Last reported error could not find host matching read preference mode"primary" for set SH1RS
24
Time of Reported error Sat Mar 23 2019 14 49 13 GMT+0800 (CST)
25
Migration Results for the last 24 hours
26
1 Failed with error 'chunk too big to move', from SH1RS to SH2RS
27
databases
28
{ "_id" "test", "primary" "SH1RS", "partitioned" true
29
test.test_collection
30
shard key "number" 1
31
uniquefalse
32
balancingtrue
33
chunks
34
SH1RS 7
35
{ "number" "$minKey" 1 -->> "number" 1198 on SH1RS Timestamp(1, 4)
36
{ "number" 1198 -->> "number" 2396 on SH1RS Timestamp(1, 5)
37
{ "number" 2396 -->> "number" 3591 on SH1RS Timestamp(1, 6)
38
{ "number" 3591 -->> "number" 4789 on SH1RS Timestamp(1, 7)
39
{ "number" 4789 -->> "number" 7188 on SH1RS Timestamp(1, 1)
40
{ "number" 7188 -->> "number" 9585 on SH1RS Timestamp(1, 2)
41
{ "number" 9585 -->> "number" "$maxKey" 1 on SH1RS Timestamp(1, 3)
42
43
mongos> db.stats()db.stats()
44
45
"raw"
46
"SH1RS/MongoDB01:28001,MongoDB02:28001,MongoDB03:28001"
47
"db" "test",
48
"collections" 2,
49
"objects" 1520654,
50
"avgObjSize" 70.83500454409747,
51
"dataSize" 107715533,
52
"storageSize" 39489536,
53
"numExtents" 0,
54
"indexes" 3,
55
"indexSize" 30613504,
56
"ok" 1,
57
"$gleStats"
58
"lastOpTime" Timestamp(0, 0),
59
"electionId" ObjectId("7fffffff0000000000000005")
60
61
,
62
"SH2RS/MongoDB04:28001,MongoDB05:28001,MongoDB06:28001"
63
"db" "test",
64
"collections" 1,
65
"objects" 54412,
66
"avgObjSize" 70.83121370285966,
67
"dataSize" 3854068,
68
"storageSize" 1032192,
69
"numExtents" 0,
70
"indexes" 2,
71
"indexSize" 815104,
72
"ok" 1,
73
"$gleStats"
74
"lastOpTime" Timestamp(0, 0),
75
"electionId" ObjectId("7fffffff0000000000000004")
76
77
,
78
"SH3RS/MongoDB07:28001,MongoDB08:28001,MongoDB09:28001"
79
"db" "test",
80
"collections" 0,
81
"objects" 0,
82
"avgObjSize" 0,
83
"dataSize" 0,
84
"storageSize" 0,
85
"numExtents" 0,
86
"indexes" 0,
87
"indexSize" 0,
88
"fileSize" 0,
89
"ok" 1,
90
"$gleStats"
91
"lastOpTime" Timestamp(0, 0),
92
"electionId" ObjectId("7fffffff0000000000000004")
93
94
,
95
"SH4RS/MongoDB10:28001,MongoDB11:28001,MongoDB12:28001"
96
"db" "test",
97
"collections" 0,
98
"objects" 0,
99
"avgObjSize" 0,
100
"dataSize" 0,
101
"storageSize" 0,
102
"numExtents" 0,
103
"indexes" 0,
104
"indexSize" 0,
105
"fileSize" 0,
106
"ok" 1,
107
"$gleStats"
108
"lastOpTime" Timestamp(0, 0),
109
"electionId" ObjectId("7fffffff0000000000000003")
110
111
112
,
113
"objects" 1575066,
114
"avgObjSize" 70,
115
"dataSize" 111569601,
116
"storageSize" 40521728,
117
"numExtents" 0,
118
"indexes" 5,
119
"indexSize" 31428608,
120
"fileSize" 0,
121
"extentFreeList"
122
"num" 0,
123
"totalSize" 0
124
,
125
"ok" 1
126
127
数据在逐渐迁移中.....
三个路由mongos启动在14、15、16的28003端口,进行连接测试:
x1
"shadmin" -p "shSuper**********" --authenticationDatabase "admin" mongodb@MongoDB03 ~ $ mongo 172.1.1.15:28003 -u
2
MongoDB shell version3.2.22
3
connecting to 10.10.1.15 28003/test
4
mongos> exitexit
5
bye
6
"shadmin" -p "shSuper**********" --authenticationDatabase "admin" mongodb@MongoDB03 ~ $ mongo 172.1.1.16:28003 -u
7
MongoDB shell version3.2.22
8
connecting to 10.10.1.16 28003/test
9
10
"shadmin" -p "shSuper**********" --authenticationDatabase "admin" mongodb@MongoDB03 ~ $ mongo 172.1.1.14:28003 -u
11
MongoDB shell version3.2.22
12
connecting to 10.10.1.14 28003/test
13
mongos>
14
mongos>
-------------------网卡绑定():
cd /etc/sysconfig/network-scripts/
2
vi ifcfg-bond0
3
DEVICE=bond0
4
BOOTPROTO=static
5
IPADDR=10.10.1.6
6
NETMASK=255.255.0.0
7
ONBOOT=yes
8
USECTL=no
9
TYPE=Ethernet
10
11
cp ifcfg-eth1 ifcfg-eth1.bak
12
vi ifcfg-eth1
13
注释掉IP和NETMASK和GATEWAY,添加
14
MASTER=bond0
15
SLAVE=yes
16
17
cd /etc/sysconfig/network-scripts/
18
echo "DEVICE=bond0">>ifcfg-bond0
19
echo "BOOTPROTO=static">>ifcfg-bond0
20
echo "IPADDR=172.1.1.7">>ifcfg-bond0
21
echo "NETMASK=255.255.0.0">>ifcfg-bond0
22
echo "ONBOOT=yes">>ifcfg-bond0
23
echo "USECTL=no">>ifcfg-bond0
24
echo "TYPE=Ethernet">>ifcfg-bond0
25
cp ifcfg-eth1 ifcfg-eth1.bak
26
cp ifcfg-eth2 ifcfg-eth2.bak
27
echo "MASTER=bond0">>ifcfg-eth1
28
echo "SLAVE=yes">>ifcfg-eth1
29
echo "MASTER=bond0">>ifcfg-eth2
30
echo "SLAVE=yes">>ifcfg-eth2
31
echo "alias bond0 bonding">>/etc/modprobe.d/dist.conf
32
echo "options bond0 miimon=100 mode=1">>/etc/modprobe.d/dist.conf
33
编辑ifcfg-eth1和ifcfg-eth2,注释掉IP和NETMASK和GATEWAY
34
service network restart
提供给维护人员信息:
x1
分片本地管理员(该账号仅供特殊本地维护之用,无法从外部mongos登录):
2
SH1RS:PRIMARY> admin = db.getSiblingDB("admin")
3
admin
4
SH1RS:PRIMARY> admin.createUser(
5
...
6
... user"mongoadmin"
7
... pwd"Super**********"
8
... roles role"root" db"admin"
9
...
10
... )
11
Successfully added user
12
"user" "mongoadmin"
13
"roles"
14
15
"role" "root"
16
"db" "admin"
17
18
19
20
21
22
集群层管理员(日常超级账号,可用于创建其它用户,维护管理集群和数据库等,只限管理员使用,不可用于应用连接):
23
mongos> admin = db.getSiblingDB("admin")
24
admin
25
mongos> admin.createUser(
26
...
27
... user"shadmin"
28
... pwd"shSuper**********"
29
... roles role"root" db"admin"
30
...
31
... )
32
Successfully added user
33
"user" "shadmin"
34
"roles"
35
36
"role" "root"
37
"db" "admin"
38
39
40
41
42
43
"shadmin" -p "shSuper**********" --authenticationDatabase "admin" mongodb@MongoDB03 ~ $ mongo 172.1.1.14:28003 -u
44
MongoDB shell version3.2.22
45
connecting to 10.10.1.14 28003/test
46
mongos>
47
48
49
集群管理账号: shadmin 密码:shSuper**********
50
mongos:共配置了三个,10.10.1.14/15/16 端口:28003 任意一个IP均可连接到集群