Ceph分布式存储系统搭建
实验环境:
| 主机名 | CPU核心 | 系统磁盘 | IP地址(NAT) | 系统 |
|---|---|---|---|---|
| ceph01 | 1*2 | 40GB | 192.168.200.20 | CentOS 7 |
| ceph02 | 1*2 | 40GB | 192.168.200.30 | CentOS 7 |
| ceph03 | 1*2 | 40GB | 192.168.200.40 | CentOS 7 |
注:以下操作须在不同server上进行,请看清后再行操作
环境配置
ceph01 & ceph02 & ceph03
- 配置IP到主机名映射
[root@ceph01]# echo "192.168.200.20 ceph01" >> /etc/hosts[root@ceph01]# echo "192.168.200.30 ceph02" >> /etc/hosts[root@ceph01]# echo "192.168.200.40 ceph03" >> /etc/hosts - 关闭SELinux
[root@ceph01]# sed -i 's/SELINUX=.*/SELINUX=disabled/g' /etc/selinux/config[root@ceph01]# setenforce 0[root@ceph01]# getenforce - 关闭防火墙
[root@ceph01]# iptables -F[root@ceph01]# iptables -X[root@ceph01]# iptables -Z[root@ceph01]# systemctl stop firewalld[root@ceph01]# systemctl disable firewalld.service请在ceph02和ceph03上重复以上操作
ceph01
1.设置SSH无密码登录ceph02和ceph03[root@ceph01]# ssh-keygen[root@ceph01]# ssh-copy-id -i /root/.ssh/id_rsa.pub root@ceph02[root@ceph01]# ssh-copy-id -i /root/.ssh/id_rsa.pub root@ceph03
2. 安装时钟同步[root@ceph01]# yum install ntp ntpdate -y
3. 配置ntp,注释掉其他ntp服务器,设置为本机提供时钟同步[root@ceph01]# vi /etc/ntp.conf
添加以下两行:
1 | server 127.127.1.0 |
[root@ceph01]# systemctl start ntpd.service[root@ceph01]# systemctl enable ntpd.service
ceph02 & ceph03
- 同步为ceph01的时间
[root@ceph02]# yum install ntp ntpdate -y[root@ceph02]# ntpdate 192.168.200.20[root@ceph02]# systemctl enable ntpd.service[root@ceph02]# systemctl status ntpd.serviceceph03重复以上操作
ceph01
- 安装FTP
[root@ceph01]# yum install vsftpd -y - 设置FTP匿名访问的路径
[root@ceph01]# echo "anon_root=/files/" >> /etc/vsftpd/vsftpd.conf[root@ceph01]# systemctl restart vsftpd[root@ceph01]# systemctl enable vsftpd - 配置yum源(设置光驱连接到虚拟机中)
[root@ceph01]# mkdir -p /files/centos[root@ceph01]# mount /dev/cdrom /mnt[root@ceph01]# cp -rfv /mnt/* /files/centos[root@ceph01]# umount /mnt
更换XianDian-IaaS-v2.0-20170420.iso到光驱中[root@ceph01]# mount /dev/cdrom /mnt[root@ceph01]# cp -rfv /mnt/* /files[root@ceph01]# umount /mnt - 安装python包
[root@ceph01]# rpm -ivh /files/iaas-repo/base/python-backports-1.0-8.el7.x86_64.rpm[root@ceph01]# rpm -ivh /files/iaas-repo/base/python-backports-ssl_match_hostname-3.4.0.2-4.el7.noarch.rpm[root@ceph01]# rpm -ivh /files/iaas-repo/base/python-setuptools-0.9.8-4.el7.noarch.rpm
ceph01 & ceph02 & ceph03
[root@ceph01]# cd /etc/yum.repos.d[root@ceph01]# mkdir backup[root@ceph01]# mv C* backup/[root@ceph01]# vi local.repo
添加如下内容:
1 | [centos] |
[root@ceph01]# yum clean all[root@ceph01]# yum repolist
安装配置Ceph
ceph01
- 安装Ceph
[root@ceph01]# yum install ceph-deploy -y[root@ceph01]# mkdir /etc/ceph[root@ceph01]# cd /etc/ceph - 创建新的集群
[root@ceph01]# ceph-deploy new ceph01 - 所有节点安装Ceph
[root@ceph01]# ceph-deploy install ceph01 ceph02 ceph03 - 检查Ceph版本
[root@ceph01]# ceph -v - 创建第一个Ceph monitor
[root@ceph01]# ceph-deploy --overwrite-conf mon create-initial - 查看集群状态
[root@ceph01]# ceph -s - 检查ceph01上可用磁盘
[root@ceph01]# ceph-deploy disk list ceph01 - 创建OSD所用目录并设置权限
[root@ceph01]# mkdir -p /opt/osd1[root@ceph01]# chmod 777 /opt/osd1
ceph02
[root@ceph01]# mkdir -p /opt/osd2[root@ceph01]# chmod 777 /opt/osd2
ceph03
[root@ceph01]# mkdir -p /opt/osd3[root@ceph01]# chmod 777 /opt/osd3
ceph01
创建OSD节点
[root@ceph01]# ceph-deploy osd prepare ceph01:/opt/osd1 ceph02:/opt/osd2 ceph03:/opt/osd3激活OSD节点
[root@ceph01]# ceph-deploy osd activate ceph01:/opt/osd1 ceph02:/opt/osd2 ceph03:/opt/osd3检查集群状态
[root@ceph01]# ceph -shealth(健康)显示:OK则为成功
cluster 147354a2-e174-4ea6-8464-58f1839fbd2f
health HEALTH_OK
monmap e1: 1 mons at {ceph-node1=192.168.200.211:6789/0}
election epoch 2, quorum 0 ceph-node1
osdmap e13: 3 osds: 3 up, 3 in
pgmap v19: 64 pgs, 1 pools, 0 bytes data, 0 objects
25907 MB used, 55365 MB / 81272 MB avail
64 active+clean开放权限给其他节点,进行灾备处理
[root@ceph01]# ceph-deploy admin ceph-node{1,2,3}[root@ceph01]# chmod +r /etc/ceph/ceph.client.admin.keyring
Ceph运维操作
ceph01 & ceph02 & ceph03
- 检查ceph的安装状态
[root@ceph01]# ceph status - 观察集群的健康状况
[root@ceph01]# ceph -w - 检查ceph monitor仲裁状态
[root@ceph01]# ceph quorum_status --format json-pretty - 导出monitor信息
[root@ceph01]# ceph mon dump - 检查集群使用状态
[root@ceph01]# ceph df - 检查ceph monitor、osd、PG状态
[root@ceph01]# ceph mon stat[root@ceph01]# ceph osd stat[root@ceph01]# ceph pg stat - 列表pg,导出pg
[root@ceph01]# ceph pg dump - ceph 存储池列表
[root@ceph01]# ceph osd lspools - 检查osd的crush
[root@ceph01]# ceph osd tree - 列表集群的认证密钥
[root@ceph01]# ceph auth list

