案例:mongodb3.6集群搭建:分片+副本集
环境准备:
操作系统:Centos7.3
节点: 10.2.13.187(node1)、10.2.13.186(node2)、10.2.13.185(node3)
角色规划:
服务器node1:
mongos
config server
shard server1 主节点
shard server2 仲裁节点
shard server3 副本节点
服务器node2:
mongos
config server
shard server1 副本节点
shard server2 主节点
shard server3 仲裁节点
服务器node3:
mongos
config server
shard server1 仲裁节点
shard server2 副本节点
shard server3 主节点
端口分配:
mongos:21000
config:22000、
shard1: 27001
shard2: 27002
shard3: 27003
配置前:
* 关闭firewalld防火墙 systemctl stop firewalld或者配置如下开放端口
firewall-cmd --add-port=21000/tcp --permanent
firewall-cmd --add-port=21000/tcp
firewall-cmd --add-port=27001/tcp --permanent
firewall-cmd --add-port=27001/tcp
firewall-cmd --add-port=27002/tcp --permanent
firewall-cmd --add-port=27002/tcp
firewall-cmd --add-port=27003/tcp --permanent
firewall-cmd --add-port=27003/tcp
firewall-cmd --add-port=22000/tcp --permanent
firewall-cmd --add-port=22000/tcp
* 关闭selinux
vi /etc/selinux/config
SELINUX=disabled
临时生效
setenforce 0
* 配置时间同步
1)安装mongodb(以node1为例,三个节点都要配置)
[root@node1 ~]# cd /etc/yum.repos.d/
[root@node1 yum.repos.d]# vim mongodb-org-3.6.repo
[mongodb-org-3.6]
name = MongoDB Repository
baseurl = https://repo.mongodb.org/yum/redhat/7Server/mongodb-org/3.6/x86_64/
gpgcheck = 0
enabled = 1
[root@node1 ~]# yum install -y mongodb-org-3.6.3 mongodb-org-server-3.6.3 mongodb-org-shell-3.6.3 mongodb-org-mongos-3.6.3 mongodb-org-tools-3.6.3
2) 路径规划并创建
分别在每台机器上建立conf、mongos、config、shard1、shard2、shard3
[root@node1 ~]# mkdir -p /etc/mongod/conf.d
config server数据存储路径
[root@node1 ~]# mkdir -p /var/lib/mongo/config/data
shard server数据存储路径
[root@node1 ~]# mkdir -p /var/lib/mongo/shard1/data
[root@node1 ~]# mkdir -p /var/lib/mongo/shard2/data
[root@node1 ~]# mkdir -p /var/lib/mongo/shard3/data
[root@node1 ~]# chown -R mongod.mongod /var/lib/mongo
日志文件路径
/var/log/mongodb
(注意:以上路径需要在三个节点上都要创建)
3) 配置config server服务器(在三个节点上都要配置,由于mongodb3.4版本以后,所有配置服务器也要创建副本集)
~~~
[root@node1 ~]# cd /etc/mongod/conf.d/
[root@node1 conf.d]# ll
total 0
[root@node1 conf.d]# vim config.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/configsrv.log
storage:
dbpath: /var/log/mongo/config/data
journal:
enabled: true
processManagement:
fork: true
pidFilePath: /var/run/mongodb/configsvr.pid
timeZoneInfo: /usr/share/zoneinfo
net:
port: 22000
bindIp: 0.0.0.0
maxIncomingConnections: 30000
replication:
replSetName: csReplSet
sharding:
clusterRole: configsvr
然后把配置文件发送到node2和node3上
[root@node1 conf.d]# scp config.conf root@10.2.13.185:/etc/mongod/conf.d/
root@10.2.13.185's password:
config.conf 100% 426 308.1KB/s 00:00
[root@node1 conf.d]# scp config.conf root@10.2.13.186:/etc/mongod/conf.d/
root@10.2.13.186's password:
config.conf 100% 426 308.1KB/s 00:00
~~~
配置mongod-configsvr启动脚本
~~~
[root@node1 conf.d]# cat /usr/lib/systemd/system/mongod-configsvr.service
[Unit]
Description=Mongodb Config Server
After=network.target
Documentation=https://docs.mongodb.org/manual
[Service]
User=mongod
Group=mongod
Environment="OPTIONS=-f /etc/mongod/conf.d/config.conf"
ExecStart=/usr/bin/mongod $OPTIONS
ExecStartPre=/usr/bin/mkdir -p /var/run/mongodb
ExecStartPre=/usr/bin/chown mongod:mongod /var/run/mongodb
ExecStartPre=/usr/bin/chmod 0755 /var/run/mongodb
PermissionsStartOnly=true
PIDFile=/var/run/mongodb/configsvr.pid
Type=forking
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for for mongod as specified in
# http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings
[Install]
WantedBy=multi-user.target
~~~
然后通过scp把脚本拷贝到node2和node3上
~~~
[root@node1 system]# scp mongod-configsvr.service root@10.2.13.186:/usr/lib/systemd/system
The authenticity of host '10.2.13.186 (10.2.13.186)' can't be established.
ECDSA key fingerprint is SHA256:M6+YJtAkiei0mHVZPMJOq4Wb2JfoDaNdhZce8qESWjw.
ECDSA key fingerprint is MD5:09:e2:00:bb:f4:66:77:4f:37:6d:a6:27:96:e9:d8:54.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.2.13.186' (ECDSA) to the list of known hosts.
root@10.2.13.186's password:
mongod-configsvr.service 100% 910 693.4KB/s 00:00
[root@node1 system]# scp mongod-configsvr.service root@10.2.13.185:/usr/lib/systemd/system
The authenticity of host '10.2.13.185 (10.2.13.185)' can't be established.
ECDSA key fingerprint is SHA256:BU7bQKNkN5T4hhkpVmCmEy8hRXZeAJM0AfSONQP48KE.
ECDSA key fingerprint is MD5:f2:b5:a8:40:ee:07:3a:99:ba:c1:37:cc:59:79:f3:82.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added '10.2.13.185' (ECDSA) to the list of known hosts.
root@10.2.13.185's password:
mongod-configsvr.service 100% 910 557.6KB/s 00:00
~~~
启动mongod-configsvr服务(三个节点都启动)
[root@node1 ~]# systemctl enable mongod-configsvr
[root@node1 ~]# systemctl start mongod-configsvr
[root@node1 ~]# ss -tunlp|grep 22000
tcp LISTEN 0 128 *:22000 *:* users:(("mongod",pid=18953,fd=11))
[root@node1 ~]#
登录一台节点,初始化配置副本集
~~~
> config = { _id:"csReplSet",members:[ {_id:1,host:"10.2.13.187:22000"},{_id:2,host:"10.2.13.186:22000"},{_id:3,host:"10.2.13.185:22000"}] }
{
"_id" : "csReplSet",
"members" : [
{
"_id" : 1,
"host" : "10.2.13.187:22000"
},
{
"_id" : 2,
"host" : "10.2.13.186:22000"
},
{
"_id" : 3,
"host" : "10.2.13.185:22000"
}
]
}
> rs.initiate(config)
{
"ok" : 1,
"operationTime" : Timestamp(1520925604, 1),
"$gleStats" : {
"lastOpTime" : Timestamp(1520925604, 1),
"electionId" : ObjectId("000000000000000000000000")
},
"$clusterTime" : {
"clusterTime" : Timestamp(1520925604, 1),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
}
}
~~~
二)配置分片副本集
1)设置第一个分片副本集
添加配置文件
~~~
[root@node1 conf.d]# cat shard1.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/shard1.log
storage:
dbPath: /var/lib/mongo/shard1/data
journal:
enabled: true
processManagement:
fork: true
pidFilePath: /var/run/mongodb/shard1.pid
timeZoneInfo: /usr/share/zoneinfo
net:
port: 27001
bindIp: 10.2.13.187
replication:
replSetName: shard1
sharding:
clusterRole: shardsvr
~~~
添加启动脚本
~~~
[root@node1 conf.d]# cat /usr/lib/systemd/system/mongod-shard1.service
[Unit]
Description=Mongodb Config Server
After=network.target
Documentation=https://docs.mongodb.org/manual
[Service]
User=mongod
Group=mongod
Environment="OPTIONS=-f /etc/mongod/conf.d/shard1.conf"
ExecStart=/usr/bin/mongod $OPTIONS
ExecStartPre=/usr/bin/mkdir -p /var/run/mongodb
ExecStartPre=/usr/bin/chown mongod:mongod /var/run/mongodb
ExecStartPre=/usr/bin/chmod 0755 /var/run/mongodb
PermissionsStartOnly=true
PIDFile=/var/run/mongodb/shard1.pid
Type=forking
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for for mongod as specified in
# http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings
[Install]
WantedBy=multi-user.target
~~~
启动服务
[root@node1 ~]# systemctl enable mongod-shard1
[root@node1 ~]# systemctl start mongod-shard1
然后把配置文件和启动脚本拷贝到node2和node3上
登陆任意一台非仲裁节点服务器,初始化副本集,shard1节点node3为仲裁节点
~~~
> config={ _id:"shard1",members:[{_id:1,host:"10.2.13.187:27001",priority:2},{_id:2,host:"10.2.13.186:27001",priority:1},{_id:3,host:"10.2.13.185:27001",arbiterOnly:true}]
... }
{
"_id" : "shard1",
"members" : [
{
"_id" : 1,
"host" : "10.2.13.187:27001",
"priority" : 2
},
{
"_id" : 2,
"host" : "10.2.13.186:27001",
"priority" : 1
},
{
"_id" : 3,
"host" : "10.2.13.185:27001",
"arbiterOnly" : true
}
]
}
> rs.initiate(config)
{ "ok" : 1 }
shard1:SECONDARY>
shard1:PRIMARY>
[root@node3 tmp]# mongo 10.2.13.185:27001
MongoDB shell version v3.6.3
connecting to: mongodb://10.2.13.185:27001/test
MongoDB server version: 3.6.3
Server has startup warnings:
2018-03-13T16:26:25.158+0800 I CONTROL [initandlisten]
2018-03-13T16:26:25.159+0800 I CONTROL [initandlisten] ** WARNING: Access control is not enabled for the database.
2018-03-13T16:26:25.159+0800 I CONTROL [initandlisten] ** Read and write access to data and configuration is unrestricted.
2018-03-13T16:26:25.159+0800 I CONTROL [initandlisten]
shard1:ARBITER>
[root@node2 scripts]# mongo 10.2.13.186:27001
MongoDB shell version v3.6.3
connecting to: mongodb://10.2.13.186:27001/test
MongoDB server version: 3.6.3
Server has startup warnings:
2018-03-13T16:26:10.147+0800 I CONTROL [initandlisten]
2018-03-13T16:26:10.147+0800 I CONTROL [initandlisten] ** WARNING: Access control is not enabled for the database.
2018-03-13T16:26:10.147+0800 I CONTROL [initandlisten] ** Read and write access to data and configuration is unrestricted.
2018-03-13T16:26:10.147+0800 I CONTROL [initandlisten]
2018-03-13T16:26:10.148+0800 I CONTROL [initandlisten]
2018-03-13T16:26:10.148+0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/enabled is 'always'.
2018-03-13T16:26:10.148+0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2018-03-13T16:26:10.148+0800 I CONTROL [initandlisten]
2018-03-13T16:26:10.148+0800 I CONTROL [initandlisten] ** WARNING: /sys/kernel/mm/transparent_hugepage/defrag is 'always'.
2018-03-13T16:26:10.148+0800 I CONTROL [initandlisten] ** We suggest setting it to 'never'
2018-03-13T16:26:10.148+0800 I CONTROL [initandlisten]
shard1:SECONDARY>
~~~
2)配置第二分片副本集
配置文件
~~~
[root@node1 conf.d]# vim shard2.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/shard2.log
storage:
dbPath: /var/lib/mongo/shard2/data
journal:
enabled: true
processManagement:
fork: true
pidFilePath: /var/run/mongodb/shard2.pid
timeZoneInfo: /usr/share/zoneinfo
net:
port: 27002
bindIp: 10.2.13.187
replication:
replSetName: shard2
sharding:
clusterRole: shardsvr
~~~
启动脚本
~~~
[root@node1 system]# cat mongod-shard2.service
[Unit]
Description=Mongodb Config Server
After=network.target
Documentation=https://docs.mongodb.org/manual
[Service]
User=mongod
Group=mongod
Environment="OPTIONS=-f /etc/mongod/conf.d/shard2.conf"
ExecStart=/usr/bin/mongod $OPTIONS
ExecStartPre=/usr/bin/mkdir -p /var/run/mongodb
ExecStartPre=/usr/bin/chown mongod:mongod /var/run/mongodb
ExecStartPre=/usr/bin/chmod 0755 /var/run/mongodb
PermissionsStartOnly=true
PIDFile=/var/run/mongodb/shard2.pid
Type=forking
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for for mongod as specified in
# http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings
[Install]
WantedBy=multi-user.target
~~~
启动服务器
[root@node1 ~]# systemctl enable mongod-shard2
Created symlink from /etc/systemd/system/multi-user.target.wants/mongod-shard2.service to /usr/lib/systemd/system/mongod-shard2.service.
[root@node1 ~]# systemctl start mongod-shard2
然后把文件拷贝到node2和node3
~~~
[root@node1 conf.d]# scp shard2.conf root@10.2.13.185:/etc/mongod/conf.d/
root@10.2.13.185's password:
shard2.conf 100% 381 224.6KB/s 00:00
[root@node1 conf.d]# scp shard2.conf root@10.2.13.186:/etc/mongod/conf.d/
root@10.2.13.186's password:
shard2.conf 100% 381 224.6KB/s 00:00
[root@node1 system]# scp mongod-shard2.service root@10.2.13.186:/usr/lib/systemd/system
root@10.2.13.186's password:
mongod-shard2.service 100% 905 503.5KB/s 00:00 [root@node1 system]# scp mongod-shard2.service root@10.2.13.185:/usr/lib/systemd/system
root@10.2.13.185's password:
mongod-shard2.service 100% 905 503.5KB/s 00:00 然后登陆到node2和node3,调整一下配置文件,如
[root@node3 tmp]# vim /etc/mongod/conf.d/shard2.conf
[root@node3 tmp]# systemctl start mongod-shard2
[root@node3 tmp]# systemctl enable mongod-shard2
Created symlink from /etc/systemd/system/multi-user.target.wants/mongod-shard2.service to /usr/lib/systemd/system/mongod-shard2.service.
~~~
登陆任意一台非仲裁节点服务器,初始化副本集
~~~
> config={_id:"shard2",members:[{_id:1,host:"10.2.13.187:27002",arbiterOnly:true},{_id:2,host:"10.2.13.186:27002",priority:2},{_id:3,host:"10.2.13.185:27002",priority:1}]}
{
"_id" : "shard2",
"members" : [
{
"_id" : 1,
"host" : "10.2.13.187:27002",
"arbiterOnly" : true
},
{
"_id" : 2,
"host" : "10.2.13.186:27002",
"priority" : 2
},
{
"_id" : 3,
"host" : "10.2.13.185:27002",
"priority" : 1
}
]
}
> rs.initiate(config)
{ "ok" : 1 }
shard2:OTHER>
shard2:SECONDARY>
shard2:SECONDARY>
shard2:SECONDARY>
shard2:SECONDARY>
shard2:SECONDARY>
shard2:SECONDARY>
shard2:SECONDARY>
shard2:SECONDARY>
shard2:PRIMARY>
~~~
3)设置第三个分片副本集
配置文件
~~~
[root@node1 conf.d]# vim shard3.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/shard3.log
storage:
dbPath: /var/lib/mongo/shard3/data
journal:
enabled: true
processManagement:
fork: true
pidFilePath: /var/run/mongodb/shard3.pid
timeZoneInfo: /usr/share/zoneinfo
net:
port: 27003
bindIp: 10.2.13.187
replication:
replSetName: shard3
sharding:
clusterRole: shardsvr
然后拷贝到node2和node3
[root@node1 conf.d]# scp shard3.conf root@10.2.13.185:/etc/mongod/conf.d/
[root@node1 conf.d]# scp shard3.conf root@10.2.13.186:/etc/mongod/conf.d/
~~~
启动文件
~~~
[root@node1 system]# cat mongod-shard3.service
[Unit]
Description=Mongodb Config Server
After=network.target
Documentation=https://docs.mongodb.org/manual
[Service]
User=mongod
Group=mongod
Environment="OPTIONS=-f /etc/mongod/conf.d/shard3.conf"
ExecStart=/usr/bin/mongod $OPTIONS
ExecStartPre=/usr/bin/mkdir -p /var/run/mongodb
ExecStartPre=/usr/bin/chown mongod:mongod /var/run/mongodb
ExecStartPre=/usr/bin/chmod 0755 /var/run/mongodb
PermissionsStartOnly=true
PIDFile=/var/run/mongodb/shard3.pid
Type=forking
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for for mongod as specified in
# http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings
[Install]
WantedBy=multi-user.target
拷贝到node2和node3
[root@node1 system]# scp mongod-shard3.service root@10.2.13.185:/usr/lib/systemd/system
[root@node1 system]# scp mongod-shard3.service root@10.2.13.186:/usr/lib/systemd/system
~~~
启动服务
[root@node1 system]# systemctl start mongod-shard3.service
[root@node1 system]# systemctl enable mongod-shard3.service
Created symlink from /etc/systemd/system/multi-user.target.wants/mongod-shard3.service to /usr/lib/systemd/system/mongod-shard3.service.
然后修改node2和node3的配置文件,然后启动服务
配置副本集
~~~
> config={_id:"shard3",members:[{_id:1,host:"10.2.13.187:27003",priority:1},{_id:2,host:"10.2.13.186:27003",arbiterOnly:true},{_id:3,host:"10.2.13.185:27003",priority:2}]}
{
"_id" : "shard3",
"members" : [
{
"_id" : 1,
"host" : "10.2.13.187:27003",
"priority" : 1
},
{
"_id" : 2,
"host" : "10.2.13.186:27003",
"arbiterOnly" : true
},
{
"_id" : 3,
"host" : "10.2.13.185:27003",
"priority" : 2
}
]
}
> rs.initiate(config)
{ "ok" : 1 }
~~~
三)配置路由服务器 mongos
先启动配置服务器和分片服务器,后启动路由实例启动路由实例:(三台机器)
配置文件
~~~
[root@node3 system]# vim /etc/mongod/conf.d/mongos.conf
systemLog:
destination: file
logAppend: true
path: /var/log/mongodb/mongos.log
processManagement:
fork: true
pidFilePath: /var/run/mongodb/mongos.pid
timeZoneInfo: /usr/share/zoneinfo
net:
port: 21000
bindIp: 10.2.13.185
sharding:
configDB: csReplSet/10.2.13.187:22000, 10.2.13.186:22000, 10.2.13.185:22000
~~~
启动脚本
~~~
[root@node3 system]# cat mongod-mongos.service
[Unit]
Description=Mongodb Mongos Server
After=network.target mongod-shard1.service mongod-shard2.service mongod-shard3.service
Documentation=https://docs.mongodb.org/manual
[Service]
User=mongod
Group=mongod
Environment="OPTIONS=-f /etc/mongod/conf.d/mongos.conf"
ExecStart=/usr/bin/mongos $OPTIONS
ExecStartPre=/usr/bin/mkdir -p /var/run/mongodb
ExecStartPre=/usr/bin/chown mongod:mongod /var/run/mongodb
ExecStartPre=/usr/bin/chmod 0755 /var/run/mongodb
PermissionsStartOnly=true
PIDFile=/var/run/mongodb/mongos.pid
Type=forking
# file size
LimitFSIZE=infinity
# cpu time
LimitCPU=infinity
# virtual memory size
LimitAS=infinity
# open files
LimitNOFILE=64000
# processes/threads
LimitNPROC=64000
# locked memory
LimitMEMLOCK=infinity
# total threads (user+kernel)
TasksMax=infinity
TasksAccounting=false
# Recommended limits for for mongod as specified in
# http://docs.mongodb.org/manual/reference/ulimit/#recommended-settings
[Install]
WantedBy=multi-user.target
~~~
启动服务
~~~
[root@node3 system]# systemctl start mongod-mongos.service
[root@node3 system]#
[root@node3 system]#
[root@node3 system]# systemctl enable mongod-mongos.service
Created symlink from /etc/systemd/system/multi-user.target.wants/mongod-mongos.service to /usr/lib/systemd/system/mongod-mongos.service.
~~~
然后把配置文件和启动脚本拷贝到node1和node2
四)启动分片
mongo 10.2.13.187:21000
use admin
mongos> sh.addShard("shard1/10.2.13.187:27001,10.2.13.186:27001,10.3.13.185:27001")
mongos> sh.addShard("shard2/10.2.13.187:27002,10.2.13.186:27002,10.3.13.185:27002")
mongos> sh.addShard("shard3/10.2.13.187:27003,10.2.13.186:27003,10.3.13.185:27003")
如下
mongos> sh.addShard("shard2/10.2.13.187:27003,10.2.13.186:27003,10.2.13.185:27003")
{
"shardAdded" : "shard2",
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1520933270, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1520933270, 3)
}
~~~
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5aa77bb0cb090fc235bf0ba4")
}
shards:
{ "_id" : "shard1", "host" : "shard1/10.2.13.186:27001,10.2.13.187:27001", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/10.2.13.185:27002,10.2.13.186:27002", "state" : 1 }
{ "_id" : "shard3", "host" : "shard3/10.2.13.185:27003,10.2.13.187:27003", "state" : 1 }
active mongoses:
"3.6.3" : 3
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
~~~
五、测试
1)sh.enableSharding(dbname)
通过以上命令测试在哪个数据库上启用分片
2)sh.shardCollection(fullName,key,unique,options) shards the collection
通过以上命令在哪个集合上启用分片
~~~
mongos> sh.enableSharding("testdb")
{
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1520933845, 3),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1520933845, 3)
}
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id" : 1,
"minCompatibleVersion" : 5,
"currentVersion" : 6,
"clusterId" : ObjectId("5aa77bb0cb090fc235bf0ba4")
}
shards:
{ "_id" : "shard1", "host" : "shard1/10.2.13.186:27001,10.2.13.187:27001", "state" : 1 }
{ "_id" : "shard2", "host" : "shard2/10.2.13.185:27002,10.2.13.186:27002", "state" : 1 }
{ "_id" : "shard3", "host" : "shard3/10.2.13.185:27003,10.2.13.187:27003", "state" : 1 }
active mongoses:
"3.6.3" : 3
autosplit:
Currently enabled: yes
balancer:
Currently enabled: yes
Currently running: no
Failed balancer rounds in last 5 attempts: 0
Migration Results for the last 24 hours:
No recent migrations
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
{ "_id" : "testdb", "primary" : "shard2", "partitioned" : true }
不做分片的,都放到shard2上
mongos> sh.shardCollection("testdb.student",{"age":1})
{
"collectionsharded" : "testdb.student",
"collectionUUID" : UUID("24a2b55c-429d-48ad-85f8-1369b4a709d1"),
"ok" : 1,
"$clusterTime" : {
"clusterTime" : Timestamp(1520934029, 15),
"signature" : {
"hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="),
"keyId" : NumberLong(0)
}
},
"operationTime" : Timestamp(1520934029, 15)
}
~~~
mongos> use testdb
mongos> for (var i=1;i<=1000000;i++){
... db.student.insert({"name":"stu"+i,"age":(i%120),"classes":"class"+i,"address":"jinlindonglu"})
... }
查看分片情况
~~~
databases:
{ "_id" : "config", "primary" : "config", "partitioned" : true }
config.system.sessions
shard key: { "_id" : 1 }
unique: false
balancing: true
chunks:
shard1 1
{ "_id" : { "$minKey" : 1 } } -->> { "_id" : { "$maxKey" : 1 } } on : shard1 Timestamp(1, 0)
{ "_id" : "testdb", "primary" : "shard2", "partitioned" : true }
testdb.student
shard key: { "age" : 1 }
unique: false
balancing: true
chunks:
shard1 36
shard2 49
shard3 35
too many chunks to print, use verbose if you want to force print
~~~
启动
mongodb的启动顺序是,先启动配置服务器,在启动分片,最后启动mongos.
systemctl start mongod-configsvr
systemctl start mongod-shard1
systemctl start mongod-shard2
systemctl start mongod-shard3
systemctl start mongod-mongos
关闭:
systemctl stop mongod-mongos
systemctl stop mongod-shard3
systemctl stop mongod-shard2
systemctl stop mongod-shard1
systemctl stop mongod-configsvr