CentOS7手动部署多Monitor节点Ceph集群

合集下载
  1. 1、下载文档前请自行甄别文档内容的完整性,平台不提供额外的编辑、内容补充、找答案等附加服务。
  2. 2、"仅部分预览"的文档,不可在线预览部分如存在完整性等问题,可反馈申请退款(可完整预览的文档不适用该条件!)。
  3. 3、如文档侵犯您的权益,请联系客服反馈,我们会尽快为您处理(人工客服工作时间:9:00-18:30)。

CentOS7手动部署多Monitor节点Ceph集群|| Value
monmds01 192.168.0.160
monmds01 192.168.0.161
monmds01 192.168.0.162
storage01 192.168.0.163
storage01 192.168.0.164
storage01 192.168.0.165
||第一个Monitor
1- 域名解析
# vim /etc/hosts
192.168.0.160 monmds01
192.168.0.161 monmds02
192.168.0.162 monmds03
192.168.0.163 storage01
192.168.0.164 storage02
192.168.0.165 storage03
拷贝到其他五台节点
2- 生成标识ID
# uuidgen
ce492d6f-f80e-4c8d-bf13-d6f319163be3
3- 主配置文件
# vim /etc/ceph/ceph.conf
[global]
fsid = ce492d6f-f80e-4c8d-bf13-d6f319163be3
mon initial members = monmds01,monmds02,monmds03 mon host = 192.168.0.160,192.168.0.161,192.168.0.162 public network = 192.168.0.0/24
auth cluster required = cephx
auth service required = cephx
auth client required = cephx
osd journal size = 1024
filestore xattr use omap = true
osd pool default size = 2
osd pool default min size = 1
osd crush chooseleaf type = 1
osd_mkfs_type = xfs
max mds = 5
mds max file size = 100000000000000
mds cache size = 1000000
mon osd down out interval = 900
cluster_network = 192.168.0.0/24
[mon]
mon clock drift allowed = .50
4- 创建密钥
# ceph-authtool --create-keyring /tmp/ceph.mon.keyring \ > --gen-key -n mon. --cap mon 'allow *
# ceph-authtool --create-keyring /etc/ceph/ceph.client.admin.keyring \
> --gen-key -n client.admin --set-uid=0 --cap mon 'allow *' \
> --cap osd 'allow *' --cap mds 'allow'
# ceph-authtool /tmp/ceph.mon.keyring \
> --import-keyring /etc/ceph/ceph.client.admin.keyring
5- 创建mon目录并初始化mon节点
# mkdir -p /var/lib/ceph/mon/ceph-monmds01
# ceph-mon --mkfs -i monmds01 --keyring /tmp/ceph.mon.keyring
6- 创建初始化文件
# touch /var/lib/ceph/mon/ceph-monmds01/done
# touch /var/lib/ceph/mon/ceph-monmds01/sysvinit
注:如果没有这个sysvinit文件,启动服务会报错如下
# service ceph restart mon.monmds01
/etc/init.d/ceph: mon.monmds01 not found
(/etc/ceph/ceph.conf defines , /var/lib/ceph defines )
7- 启动并查看状态
# service ceph start mon.monmds01
=== mon.monmds01 ===
Starting Ceph mon.monmds01 on monmds01...
Running as unit run-8653.service.
Starting ceph-create-keys on monmds01...
# ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.monmds01.asok mon_status {
"name": "monmds01",
"rank": 0,
"state": "probing",
"election_epoch": 0,
"quorum": [],
"outside_quorum": [
"monmds01"
],
"extra_probe_peers": [
"192.168.0.161:6789\/0",
"192.168.0.162:6789\/0"
],
"sync_provider": [],
"monmap": {
"epoch": 0,
"fsid": "ce492d6f-f80e-4c8d-bf13-d6f319163be3",
"modified": "0.000000",
"created": "0.000000",
"mons": [
"rank": 0,
"name": "monmds01",
"addr": "192.168.0.160:6789\/0"
},
{
"rank": 1,
"name": "monmds02",
"addr": "0.0.0.0:0\/1"
},
{
"rank": 2,
"name": "monmds03",
"addr": "0.0.0.0:0\/2"
}
]
}
}
8- 拷贝Ceph文件到其他Monitor
# scp /etc/ceph/* monmds02:/etc/ceph/
# scp /tmp/ceph.mon.keyring monmds02:/tmp/ # scp /etc/ceph/* monmds03:/etc/ceph/
# scp /tmp/ceph.mon.keyring monmds03:/tmp/ 9- 拷贝Ceph文件到其他OSD
# scp /etc/ceph/* storage01:/etc/ceph/
#
||第二个Monitor
1- 创建mon目录并初始化
# mkdir /var/lib/ceph/mon/ceph-monmds02
# ceph-mon --mkfs -i monmds02 --keyring /tmp/ceph.mon.keyring
2- 创建初始化文件
# touch /var/lib/ceph/mon/ceph-monmds02/done
# touch /var/lib/ceph/mon/ceph-monmds02/sysvinit
3- 启动并查看状态
# service ceph start mon.monmds02
# ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.monmds01.asok mon_status {
"name": "monmds01",
"rank": 0,
"state": "electing",
"election_epoch": 3,
"quorum": [],
"outside_quorum": [],
"extra_probe_peers": [
"192.168.0.161:6789\/0",
"192.168.0.162:6789\/0"
],
"sync_provider": [],
"monmap": {
"epoch": 1,
"fsid": "ce492d6f-f80e-4c8d-bf13-d6f319163be3", "modified": "0.000000",
"created": "0.000000",
"mons": [
{
"rank": 0,
"name": "monmds01",
"addr": "192.168.0.160:6789\/0"
},
{
"rank": 1,
"name": "monmds02",
"addr": "192.168.0.161:6789\/0"
},
{
"rank": 2,
"name": "monmds03",
"addr": "0.0.0.0:0\/2"
}
]
}
}
||第三个Monitor
1- 创建mon目录并初始化
# mkdir /var/lib/ceph/mon/ceph-monmds03
# ceph-mon --mkfs -i monmds03 --keyring /tmp/ceph.mon.keyring
2- 创建初始化文件
# touch /var/lib/ceph/mon/ceph-monmds03/done
# touch /var/lib/ceph/mon/ceph-monmds03/sysvinit
3- 启动并查看状态
# service ceph start mon.monmds02
# ceph --cluster=ceph --admin-daemon /var/run/ceph/ceph-mon.monmds01.asok mon_status {
"name": "monmds01",
"rank": 0,
"state": "electing",
"election_epoch": 5,
"quorum": [],
"outside_quorum": [],
"extra_probe_peers": [
"192.168.0.161:6789\/0",
"192.168.0.162:6789\/0"
],
"sync_provider": [],
"monmap": {
"epoch": 2,
"fsid": "ce492d6f-f80e-4c8d-bf13-d6f319163be3", "modified": "2016-03-16 14:41:28.830748", "created": "0.000000",
"mons": [
{
"rank": 0,
"name": "monmds01",
"addr": "192.168.0.160:6789\/0"
},
{
"rank": 1,
"name": "monmds02",
"addr": "192.168.0.161:6789\/0"
},
{
"rank": 2,
"name": "monmds03",
"addr": "192.168.0.162:6789\/0"
}
]
}
}
4- 查看Monitor集群状态
# ceph -s
cluster ce492d6f-f80e-4c8d-bf13-d6f319163be3
health HEALTH_ERR
64 pgs stuck inactive
64 pgs stuck unclean
no osds
monmap e2: 3 mons at {monmds01=192.168.0.160:6789/0, monmds02=192.168.0.161:6789/0,
monmds03=192.168.0.162:6789/0}
election epoch 6, quorum 0,1,2 monmds01,monmds02,monmds03
osdmap e1: 0 osds: 0 up, 0 in
pgmap v2: 64 pgs, 1 pools, 0 bytes data, 0 objects
0 kB used, 0 kB / 0 kB avail
64 creating
||第一个OSD
1- 创建boot引导osd的key
# ceph-authtool -C /var/lib/ceph/bootstrap-osd/ceph.keyring
2- 创建osd
# ceph osd create
3- 创建osd目录
# mkdir /var/lib/ceph/osd/ceph-0
4- Dashboard上给虚机挂载云硬盘,在系统内格式化并挂载
# fdisk /dev/vdb
# mkfs.xfs /dev/vdb1
# vim /etc/fstab
/dev/vdb1 /var/lib/ceph/osd/ceph-0 xfs defaults,_netdev 0 0
注:因为是网络挂载的云硬盘,这里要加上_netdev参数,保证正常挂载,避免无法启动
# mount -a
5- 初始化osd数据目录
# ceph-osd -i 0 --mkfs --mkkey
6- 注册osd密钥
# ceph auth add osd.0 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-0/keyring
7- 创建osd的crush map
# ceph osd crush add-bucket storage01 host
# ceph osd crush move storage01 root=default
# ceph osd crush add osd.0 1.0 host=storage01
8- 创建初始化文件
# touch /var/lib/ceph/osd/ceph-0/sysvinit
9- 启动并查看状态
# service ceph start osd.0
=== osd.0 ===
create-or-move updated item name 'osd.0'
weight 0.59 at location {host=storage01,root=default} to crush map
Starting Ceph osd.0 on storage01...
Running as unit run-5173.service.
# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 1.00000 root default
-2 1.00000 host storage01
0 1.00000 osd.0 up 1.00000 1.00000
10- 把ceph.keyring文件拷贝到另两台osd节点
# scp /var/lib/ceph/bootstrap-osd/ceph.keyring storage02:/var/lib/ceph/bootstrap-osd/
# scp /var/lib/ceph/bootstrap-osd/ceph.keyring storage03:/var/lib/ceph/bootstrap-osd/
||第二个OSD
1- 创建osd
# ceph osd create
1
2- 创建osd目录
# mkdir /var/lib/ceph/osd/ceph-1
3- Dashboard上给虚机挂载云硬盘,在系统内格式化并挂载
# fdisk /dev/vdb
# mkfs.xfs /dev/vdb1
# vim /etc/fstab
/dev/vdb1 /var/lib/ceph/osd/ceph-1 xfs defaults,_netdev 0 0
# mount -a
4- 初始化osd数据目录
# ceph-osd -i 1 --mkfs --mkkey
5- 注册osd密钥
# ceph auth add osd.1 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-1/keyring
6- 创建osd的crush map
# ceph osd crush add-bucket storage02 host
# ceph osd crush move storage02 root=default
# ceph osd crush add osd.1 1.0 host=storage02
7- 创建初始化文件
# touch /var/lib/ceph/osd/ceph-1/sysvinit
8- 启动osd并检查状态
# service ceph start osd.1
=== osd.1 ===
create-or-move updated item name 'osd.1' weight 0.59
at location {host=storage02,root=default} to crush map
Starting Ceph osd.1 on storage02...
Running as unit run-5280.service.
# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 2.00000 root default
-2 1.00000 host storage01
0 1.00000 osd.0 up 1.00000 1.00000
-3 1.00000 host storage02
1 1.00000 osd.1 up 1.00000 1.00000
||第三个OSD
1- 创建osd
# ceph osd create
2
2- 创建osd目录
# mkdir /var/lib/ceph/osd/ceph-2
3- Dashboard上给虚机挂载云硬盘,在系统内格式化并挂载
# fdisk /dev/vdb
# mkfs.xfs /dev/vdb1
# vim /etc/fstab
/dev/vdb1 /var/lib/ceph/osd/ceph-2 xfs defaults,_netdev 0 0
# mount -a
4- 初始化osd数据目录
# ceph-osd -i 2 --mkfs --mkkey
5- 注册osd密钥
# ceph auth add osd.2 osd 'allow *' mon 'allow profile osd' -i /var/lib/ceph/osd/ceph-2/keyring
6- 创建osd的crush map
# ceph osd crush add-bucket storage03 host
# ceph osd crush move storage03 root=default
# ceph osd crush add osd.2 1.0 host=storage03
7- 创建初始化文件
# touch /var/lib/ceph/osd/ceph-2/sysvinit
8- 启动osd并检查状态
# service ceph start osd.2
=== osd.2 ===
create-or-move updated item name 'osd.2'
weight 0.59 at location {host=storage03,root=default} to crush map
Starting Ceph osd.2 on storage03...
Running as unit run-5523.service.
# ceph osd tree
ID WEIGHT TYPE NAME UP/DOWN REWEIGHT PRIMARY-AFFINITY
-1 3.00000 root default
-2 1.00000 host storage01
0 1.00000 osd.0 up 1.00000 1.00000
-3 1.00000 host storage02
1 1.00000 osd.1 up 1.00000 1.00000
-4 1.00000 host storage03
2 1.00000 osd.2 up 1.00000 1.00000
# ceph -s
cluster ce492d6f-f80e-4c8d-bf13-d6f319163be3
health HEALTH_OK
monmap e2: 3 mons at
{monmds01=192.168.0.160:6789/0,monmds02=192.168.0.1 61:6789/0,
monmds03=192.168.0.162:6789/0}
election epoch 6, quorum 0,1,2 monmds01,monmds02,monmds03
osdmap e20: 3 osds: 3 up, 3 in
pgmap v40: 64 pgs, 1 pools, 0 bytes data, 0 objects
3171 MB used, 1796 GB / 1799 GB avail
64 active+clean
||时钟问题
1- 原因
在ceph.conf里面设置的时间差最大0.5秒,造成经常健康状态告警
# ceph -s
health HEALTH_WARN
clock skew detected on mon.monmds02
Monitor clock skew detected
2- 扩大偏差阈值
# vim /etc/ceph/ceph.conf
mon clock drift allowed = 1.5
3- 重启各节点服务
# service ceph restart mon.monmds01
# service ceph restart mon.monmds02
# service ceph restart mon.monmds03
# service ceph restart osd.0
# service ceph restart osd.1
# service ceph restart osd.2
||第一个Mds [monmds01节点]
1- 创建mds目录
# mkdir /var/lib/ceph/mds/ceph-monmds01
2- 在ceph auth库中创建mds.monmds01用户,赋权
# ceph --cluster ceph --name client.bootstrap-mds \
> --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring \
> auth get-or-create mds.monmds01 osd 'allow rwx' mds 'allow' mon 'allow profile mds' \
> -o /var/lib/ceph/mds/ceph-monmds01/keyring
3- 创建初始化文件
# touch /var/lib/ceph/mds/ceph-monmds01/done
# touch /var/lib/ceph/mds/ceph-monmds01/sysvinit
4- 启动
# service ceph start mds.monmds01
=== mds.monmds01 ===
Starting Ceph mds.monmds01 on monmds01...
Running as unit run-5653.service.
||第二个Mds节点 [monmds02节点]
1- 创建mds目录
# mkdir /var/lib/ceph/mds/ceph-monmds02
2-在ceph auth库中创建mds.monmds02用户,赋权
# ceph --cluster ceph --name client.bootstrap-mds \
> --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring \
> auth get-or-create mds.monmds02 osd 'allow rwx' mds 'allow' mon 'allow profile mds' \
> -o /var/lib/ceph/mds/ceph-monmds02/keyring
3- 创建初始化文件
# touch /var/lib/ceph/mds/ceph-monmds02/sysvinit
# touch /var/lib/ceph/mds/ceph-monmds02/done
4- 启动mds服务
# service ceph start mds.monmds02
=== mds.monmds02 ===
Starting Ceph mds.monmds02 on monmds02...
Running as unit run-5932.service.
||第三个Mds节点 [monmds03节点]
1- 创建mds目录
# mkdir /var/lib/ceph/mds/ceph-monmds03
2-在ceph auth库中创建mds.monmds03用户,赋权
# ceph --cluster ceph --name client.bootstrap-mds \
> --keyring /var/lib/ceph/bootstrap-mds/ceph.keyring \
> auth get-or-create mds.monmds03 osd 'allow rwx' mds 'allow' mon 'allow profile mds' \
> -o /var/lib/ceph/mds/ceph-monmds03/keyring
3- 创建初始化文件
# touch /var/lib/ceph/mds/ceph-monmds03/sysvinit
# touch /var/lib/ceph/mds/ceph-monmds03/done
4- 启动mds服务
# service ceph start mds.monmds03
=== mds.monmds03 ===
Starting Ceph mds.monmds03 on monmds03...
Running as unit run-8226.service.
|| CephFS
1- 创建pool
# ceph osd pool create cephfs_data 150
# ceph osd pool create cephfs_metadata 150
注:150 = 100 * osd数量/ osd pool default size = 100*3/2
# ceph osd lspools
0 rbd,1 cephfs_data,2 cephfs_metadata,
2- 创建fs
# ceph fs new cephfs cephfs_metadata cephfs_data
3- 查看状态
# ceph -s
cluster ce492d6f-f80e-4c8d-bf13-d6f319163be3
health HEALTH_OK
monmap e2: 3 mons at
{monmds01=192.168.0.160:6789/0,
monmds02=192.168.0.161:6789/0,
monmds03=192.168.0.162:6789/0}
election epoch 18, quorum 0,1,2 monmds01,monmds02,monmds03
mdsmap e10: 3/3/5 up
{0=monmds03=up:active,1=monmds01=up:active,2=monm ds02=up:active}
osdmap e46: 3 osds: 3 up, 3 in
pgmap v125: 364 pgs, 3 pools, 4230 bytes data, 54 objects 3177 MB used, 1796 GB / 1799 GB avail
364 active+clean
|| 挂载
1- 客户端节点修改fstab
# vim /etc/fstab
monmds01,monmds02,monmds03:/
/mnt/three ceph name=admin,secret=AQCqwuhW0cSlDRAA CKASVHJ//eVPUl9Km17FrA==,noatime,_netdev
0 2
2- 添加域名解析
# vim /etc/hosts
192.168.0.160 monmds01
192.168.0.161 monmds02
192.168.0.162 monmds03
3- 挂载
# mkdir /mnt/three
# mount -a
# df -h
Filesystem Size Used Avail Use% Mounted on
/dev/vda1 50G 15G 33G 32% /
devtmpfs 3.9G 0 3.9G 0% /dev
tmpfs 3.9G 84K 3.9G 1% /dev/shm
tmpfs 3.9G 8.7M 3.9G 1% /run
tmpfs 3.9G 0 3.9G 0% /sys/fs/cgroup
tmpfs 799M 16K 799M 1% /run/user/42
tmpfs 799M 0 799M 0% /run/user/0
monmds01,monmds02,monmds03:/ 1.8T 3.2G 1.8T 1% /mnt/three。

相关文档
最新文档