轉自:http://noodle.blog.51cto.com/2925423/1749028node
1
2
3
4
5
6
7
|
[mongodb@node1 ~]$
cat
/etc/issue
CentOS release 6.4 (Final)
Kernel \r on an \m
[mongodb@node1 ~]$
uname
-r
2.6.32-358.el6.x86_64
[mongodb@node1 ~]$
uname
-m
x86_64
|
1
2
3
4
5
|
[root@node1 ~]
# groupadd mongodb
[root@node1 ~]
# useradd -g mongodb mongodb
[root@node1 ~]
# mkdir /data
[root@node1 ~]
# chown mongodb.mongodb /data -R
[root@node1 ~]
# su - mongodb
|
1
2
3
4
5
6
|
[mongodb@node1 ~]$
mkdir
/data/
{config,shard1,shard2,shard3,mongos,logs,configsvr,keyfile} -pv
[mongodb@node1 ~]$
touch
/data/keyfile/zxl
[mongodb@node1 ~]$
touch
/data/logs/shard
{1..3}.log
[mongodb@node1 ~]$
touch
/data/logs/
{configsvr,mongos}.log
[mongodb@node1 ~]$
touch
/data/config/shard
{1..3}.conf
[mongodb@node1 ~]$
touch
/data/config/
{configsvr,mongos}.conf
|
1
2
3
|
[mongodb@node1 ~]$ wget https:
//fastdl
.mongodb.org
/linux/mongodb-linux-x86_64-rhel62-3
.2.3.tgz
[mongodb@node3 ~]$
tar
fxz mongodb-linux-x86_64-rhel62-3.2.3.tgz -C
/data
[mongodb@node3 ~]$
ln
-s
/data/mongodb-linux-x86_64-rhel62-3
.2.3
/data/mongodb
|
1
2
|
[mongodb@node1 ~]$
echo
"export PATH=$PATH:/data/mongodb/bin"
>> ~/.bash_profile
[mongodb@node1 data]$
source
~/.bash_profile
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
[mongodb@node1 ~]$
cat
/data/config/shard1
.conf
systemLog:
destination:
file
path:
/data/logs/shard1
.log
logAppend:
true
processManagement:
fork:
true
pidFilePath:
"/data/shard1/shard1.pid"
net:
port: 10001
storage:
dbPath:
"/data/shard1"
engine: wiredTiger
journal:
enabled:
true
directoryPerDB:
true
operationProfiling:
slowOpThresholdMs: 10
mode:
"slowOp"
#security:
# keyFile: "/data/keyfile/zxl"
# clusterAuthMode: "keyFile"
replication:
oplogSizeMB: 50
replSetName:
"shard1_zxl"
secondaryIndexPrefetch:
"all"
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
[mongodb@node1 ~]$
cat
/data/config/shard2
.conf
systemLog:
destination:
file
path:
/data/logs/shard2
.log
logAppend:
true
processManagement:
fork:
true
pidFilePath:
"/data/shard2/shard2.pid"
net:
port: 10002
storage:
dbPath:
"/data/shard2"
engine: wiredTiger
journal:
enabled:
true
directoryPerDB:
true
operationProfiling:
slowOpThresholdMs: 10
mode:
"slowOp"
#security:
# keyFile: "/data/keyfile/zxl"
# clusterAuthMode: "keyFile"
replication:
oplogSizeMB: 50
replSetName:
"shard2_zxl"
secondaryIndexPrefetch:
"all"
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
|
[mongodb@node1 ~]$
cat
/data/config/shard3
.conf
systemLog:
destination:
file
path:
/data/logs/shard3
.log
logAppend:
true
processManagement:
fork:
true
pidFilePath:
"/data/shard3/shard3.pid"
net:
port: 10003
storage:
dbPath:
"/data/shard3"
engine: wiredTiger
journal:
enabled:
true
directoryPerDB:
true
operationProfiling:
slowOpThresholdMs: 10
mode:
"slowOp"
#security:
# keyFile: "/data/keyfile/zxl"
# clusterAuthMode: "keyFile"
replication:
oplogSizeMB: 50
replSetName:
"shard3_zxl"
secondaryIndexPrefetch:
"all"
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
|
[mongodb@node1 ~]$
cat
/data/config/configsvr
.conf
systemLog:
destination:
file
path:
/data/logs/configsvr
.log
logAppend:
true
processManagement:
fork:
true
pidFilePath:
"/data/configsvr/configsvr.pid"
net:
port: 10004
storage:
dbPath:
"/data/configsvr"
engine: wiredTiger
journal:
enabled:
true
#security:
# keyFile: "/data/keyfile/zxl"
# clusterAuthMode: "keyFile"
sharding:
clusterRole: configsvr
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
|
[mongodb@node3 ~]$
cat
/data/config/mongos
.conf
systemLog:
destination:
file
path:
/data/logs/mongos
.log
logAppend:
true
processManagement:
fork:
true
pidFilePath:
/data/mongos/mongos
.pid
net:
port: 10005
sharding:
configDB: 192.168.75.128:10004,192.168.75.129:10004,192.168.75.130:10004
#security:
# keyFile: "/data/keyfile/zxl"
# clusterAuthMode: "keyFile"
|
1
2
3
4
5
|
[mongodb@node1 ~]$ mongod -f
/data/config/shard1
.conf
mongod:
/usr/lib64/libcrypto
.so.10: no version information available (required by m
mongod:
/usr/lib64/libcrypto
.so.10: no version information available (required by m
mongod:
/usr/lib64/libssl
.so.10: no version information available (required by mong
mongod: relocation error: mongod: symbol TLSv1_1_client_method, version libssl.so.1n
file
libssl.so.10 with link
time
reference
|
1
2
3
|
[mongodb@node1 ~]$
su
- root
Password:
[root@node1 ~]
# yum install openssl-devel -y
|
1
2
3
4
5
6
7
8
9
10
11
12
|
[mongodb@node1 ~]$ mongod -f
/data/config/shard1
.conf
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 1737
child process started successfully, parent exiting
[mongodb@node1 ~]$ mongod -f
/data/config/shard2
.conf
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 1760
child process started successfully, parent exiting
[mongodb@node1 ~]$ mongod -f
/data/config/shard3
.conf
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 1783
child process started successfully, parent exiting
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
|
[mongodb@node1 ~]$ mongo --port 10001
MongoDB shell version: 3.2.3
connecting to: 127.0.0.1:10001
/test
Welcome to the MongoDB shell.
For interactive help,
type
"help"
.
For
more
comprehensive documentation, see
http:
//docs
.mongodb.org/
Questions? Try the support group
http:
//groups
.google.com
/group/mongodb-user
Server has startup warnings:
2016-03-08T13:28:18.508+0800 I CONTROL [initandlisten]
2016-03-08T13:28:18.508+0800 I CONTROL [initandlisten] ** WARNING:
/sys/kernel/mm/epage/enabled
is
'always'
.
2016-03-08T13:28:18.508+0800 I CONTROL [initandlisten] ** We suggest settin
2016-03-08T13:28:18.508+0800 I CONTROL [initandlisten]
2016-03-08T13:28:18.508+0800 I CONTROL [initandlisten] ** WARNING:
/sys/kernel/mm/epage/defrag
is
'always'
.
2016-03-08T13:28:18.508+0800 I CONTROL [initandlisten] ** We suggest settin
2016-03-08T13:28:18.508+0800 I CONTROL [initandlisten]
|
1
2
3
4
|
[mongodb@node2 config]$
su
- root
Password
[root@node1 ~]
# echo never > /sys/kernel/mm/transparent_hugepage/enabled
[root@node1 ~]
# echo never > /sys/kernel/mm/transparent_hugepage/defrag
|
1
2
3
4
|
[mongodb@node1 ~]$
netstat
-ntpl|
grep
mongo|
awk
'{print $NF}'
|
awk
-F
'/'
'{print $1}'
|
xargs
kill
[mongodb@node1 ~]$ mongod -f
/data/config/shard1
.conf
[mongodb@node1 ~]$ mongod -f
/data/config/shard2
.conf
[mongodb@node1 ~]$ mongod -f
/data/config/shard3
.conf
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
|
[mongodb@node1 config]$ mongo --port 10001
MongoDB shell version: 3.2.3
connecting to: 127.0.0.1:10001
/test
> use admin
switched to db admin
> config = { _id:
"shard1_zxl"
, members:[
... ... {_id:0,host:
"192.168.75.128:10001"
},
... ... {_id:1,host:
"192.168.75.129:10001"
},
... ... {_id:2,host:
"192.168.75.130:10001"
,arbiterOnly:
true
}
... ... ]
... ... }
{
"_id"
:
"shard1_zxl"
,
"members"
: [
{
"_id"
: 0,
"host"
:
"192.168.75.128:10001"
},
{
"_id"
: 1,
"host"
:
"192.168.75.129:10001"
},
{
"_id"
: 2,
"host"
:
"192.168.75.130:10001"
,
"arbiterOnly"
:
true
}
]
}
> rs.initiate(con
config connect( connectionURLTheSame( constructor
> rs.initiate(config)
{
"ok"
: 1 }
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
|
[mongodb@node2 config]$ mongo --port 10002
MongoDB shell version: 3.2.3
connecting to: 127.0.0.1:10002
/test
Welcome to the MongoDB shell.
For interactive help,
type
"help"
.
For
more
comprehensive documentation, see
http:
//docs
.mongodb.org/
Questions? Try the support group
http:
//groups
.google.com
/group/mongodb-user
> use admin
switched to db admin
> config = { _id:
"shard2_zxl"
, members:[
... ... {_id:0,host:
"192.168.75.129:10002"
},
... ... {_id:1,host:
"192.168.75.130:10002"
},
... ... {_id:2,host:
"192.168.75.128:10002"
,arbiterOnly:
true
}
... ... ]
... ... }
{
"_id"
:
"shard2_zxl"
,
"members"
: [
{
"_id"
: 0,
"host"
:
"192.168.75.129:10002"
},
{
"_id"
: 1,
"host"
:
"192.168.75.130:10002"
},
{
"_id"
: 2,
"host"
:
"192.168.75.128:10002"
,
"arbiterOnly"
:
true
}
]
}
> rs.initiate(config)
{
"ok"
: 1 }
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
|
[mongodb@node3 config]$ mongo --port 10003
MongoDB shell version: 3.2.3
connecting to: 127.0.0.1:10003
/test
Welcome to the MongoDB shell.
For interactive help,
type
"help"
.
For
more
comprehensive documentation, see
http:
//docs
.mongodb.org/
Questions? Try the support group
http:
//groups
.google.com
/group/mongodb-user
> use admin
switched to db admin
> config = {_id:
"shard3_zxl"
, members:[
... ... {_id:0,host:
"192.168.75.130:10003"
},
... ... {_id:1,host:
"192.168.75.128:10003"
},
... ... {_id:2,host:
"192.168.75.129:10003"
,arbiterOnly:
true
}
... ... ]
... ... }
{
"_id"
:
"shard3_zxl"
,
"members"
: [
{
"_id"
: 0,
"host"
:
"192.168.75.130:10003"
},
{
"_id"
: 1,
"host"
:
"192.168.75.128:10003"
},
{
"_id"
: 2,
"host"
:
"192.168.75.129:10003"
,
"arbiterOnly"
:
true
}
]
}
> rs.initiate(config)
{
"ok"
: 1 }
|
1
2
3
4
5
6
7
8
|
[mongodb@node1 logs]$ mongod -f
/data/config/configsvr
.conf
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 6317
child process started successfully, parent exiting
[mongodb@node1 logs]$ mongos -f
/data/config/mongos
.conf
about to fork child process, waiting
until
server is ready
for
connections.
forked process: 6345
child process started successfully, parent exiting
|
1
2
3
4
5
6
7
8
9
10
11
|
[mongodb@node1 config]$ mongo --port 10005
MongoDB shell version: 3.2.3
connecting to: 127.0.0.1:10005
/test
mongos> use admin
switched to db admin
mongos> db.runCommand({addshard:
"shard1_zxl/192.168.75.128:10001,192.168.75.129:10001,192.168.75.130:10001"
});
{
"shardAdded"
:
"shard1_zxl"
,
"ok"
: 1 }
mongos> db.runCommand({addshard:
"shard2_zxl/192.168.75.128:10002,192.168.75.129:10002,192.168.75.130:10002"
});
{
"shardAdded"
:
"shard2_zxl"
,
"ok"
: 1 }
mongos> db.runCommand({addshard:
"shard3_zxl/192.168.75.128:10003,192.168.75.129:10003,192.168.75.130:10003"
});
{
"shardAdded"
:
"shard3_zxl"
,
"ok"
: 1 }
|
1
2
3
4
|
#db.runCommand({addshard:"shard1_zxl/192.168.33.131:10001,192.168.33.132:10001,192.168.33.136:10001"});
#db.runCommand({addshard:"shard2_zxl/192.168.33.131:10002,192.168.33.132:10002,192.168.33.136:10002"});
#db.runCommand({addshard:"shard3_zxl/192.168.33.131:10003,192.168.33.132:10003,192.168.33.136:10003"});
注:根據本身的實際狀況,修改上面內容,快速執行。。你懂得。。
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
|
mongos> sh.status()
--- Sharding Status ---
sharding version: {
"_id"
: 1,
"minCompatibleVersion"
: 5,
"currentVersion"
: 6,
"clusterId"
: ObjectId(
"56de6f4176b47beaa9c75e9d"
)
}
shards:
{
"_id"
:
"shard1_zxl"
,
"host"
:
"shard1_zxl/192.168.75.128:10001,192.168.75.129:10001"
}
{
"_id"
:
"shard2_zxl"
,
"host"
:
"shard2_zxl/192.168.75.129:10002,192.168.75.130:10002"
}
{
"_id"
:
"shard3_zxl"
,
"host"
:
"shard3_zxl/192.168.75.128:10003,192.168.75.130:10003"
}
active mongoses:
"3.2.3"
: 3
balancer:
Currently enabled:
yes
Currently running: no
Failed balancer rounds
in
last 5 attempts: 0
Migration Results
for
the last 24 hours:
No recent migrations
databases:
|
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
|
mongos> db.runCommand( {listshards : 1 } )
{
"shards"
: [
{
|