简介
ceph是一个开源分布式存储系统,支持PB级别的存储,支持对 象存储,块存储和文件存储,高性能,高可用,可扩展。
部署网络建议架构图
部署
部署架构图,本次实验部署jewel版本
实验环境的Vagrantfile
lab1节点既作admin节点又作node节点,lab2,lab3只作为node节点,lab4作为作测试使用ceph的节点
ENV["LC_ALL"] = "en_US.UTF-8"
Vagrant.configure("2") do |config|
(1..4).each do |i|
config.vm.define "lab#{i}" do |node|
node.vm.box = "centos-7.4-docker-17"
node.ssh.insert_key = false
node.vm.hostname = "lab#{i}"
node.vm.network "private_network", ip: "11.11.11.11#{i}"
node.vm.provision "shell",
inline: "echo hello from node #{i}"
node.vm.provider "virtualbox" do |v|
v.cpus = 3
v.customize ["modifyvm", :id, "--name", "lab#{i}", "--memory", "3096"]
file_to_disk = "lab#{i}_vdb.vdi"
unless File.exist?(file_to_disk)
v.customize ['createhd', '--filename', file_to_disk, '--size', 50 * 1024]
end
v.customize ['storageattach', :id, '--storagectl', 'IDE', '--port', 1, '--device', 0, '--type', 'hdd', '--medium', file_to_disk]
end
end
end
end
复制代码
配置阿里ceph源
在所有节点执行如下操作
cat >/etc/yum.repos.d/ceph.repo<<EOF
[ceph]
name=ceph
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/x86_64/
gpgcheck=0
priority=1
[ceph-noarch]
name=cephnoarch
baseurl=http://mirrors.aliyun.com/ceph/rpm-jewel/el7/noarch/
gpgcheck=0
priority=1
[ceph-source]
name=Ceph source packages
baseurl=http://mirrors.163.com/ceph/rpm-jewel/el7/SRPMS
enabled=0
gpgcheck=1
type=rpm-md
gpgkey=http://mirrors.163.com/ceph/keys/release.asc
priority=1
EOF
yum makecache
复制代码
在admin节点安装ceph-deploy
lab1 节点
cat >/etc/yum.repos.d/ceph.repo<<EOF
[ceph-noarch]
name=Ceph noarch packages
baseurl=https://download.ceph.com/rpm-jewel/el7/noarch
enabled=1
gpgcheck=1
type=rpm-md
gpgkey=https://download.ceph.com/keys/release.asc
EOF
yum install -y ceph-deploy
复制代码
配置admin节点连接node节点
安装之后需要配置admin节点可以ssh无密码登录每个node节点和测试节点,用户需要有sudo权限
useradd ceph
echo 'ceph' | passwd --stdin ceph
echo "ceph ALL = (root) NOPASSWD:ALL" > /etc/sudoers.d/ceph
chmod 0440 /etc/sudoers.d/ceph
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/' /etc/ssh/sshd_config
systemctl reload sshd
sed -i 's/Default requiretty/#Default requiretty/' /etc/sudoers
cat >>/etc/hosts<<EOF
11.11.11.111 lab1
11.11.11.112 lab2
11.11.11.113 lab3
11.11.11.113 lab4
EOF
useradd ceph
su - ceph
ssh-keygen
ssh-copy-id ceph@lab1
ssh-copy-id ceph@lab2
ssh-copy-id ceph@lab3
ssh-copy-id ceph@lab4
复制代码
在admin节点创建集群
在lab1节点执行如下操作,node的主机名一定要设置正确 lab1, lab2, lab3。否则可能会无法实验成功
su - ceph
mkdir my-cluster
cd my-cluster
ceph-deploy new lab1
ls -l
[global]
...
public network = 11.11.11.0/24
cluster network = 11.11.11.0/24
yum install -y ceph ceph-radosgw
ceph-deploy mon create-initial
ls -l *.keyring
ceph-deploy admin lab1 lab2 lab3
ceph-deploy osd create lab1:sdb lab2:sdb lab3:sdb
rm -rf /data/osd1
mkdir -pv /data/osd1
chmod 777 -R /data/osd1
chown ceph.ceph -R /data/osd1
ceph-deploy osd prepare lab1:/data/osd1 lab2:/data/osd1 lab3:/data/osd1
ceph-deploy osd activate lab1:/data/osd1 lab2:/data/osd1 lab3:/data/osd1
ssh lab1 sudo ceph health
ssh lab1 sudo ceph -s
复制代码
清理集群
ceph-deploy purge lab1 lab2 lab3
ceph-deploy purgedata lab1 lab2 lab3
ceph-deploy forgetkeys
rm ceph.*
复制代码
扩展集群
提高可用性
- 在lab1上运行metadata server 为后续使用cephfs
- 在lab2,lab3运行monitor和manager提高集群可用性
ceph-deploy mds create lab1
ceph-deploy mon add lab2
ceph-deploy mon add lab3
ssh lab1 sudo ceph -s
ceph quorum_status --format json-pretty
复制代码
部署RGW使用Ceph Object Gateway
提供S3/Swift存储功能,实现S3和Swift兼容的接口,可以使用S3或Swift的命令行工具或SDK来使用ceph
ceph-deploy rgw create lab1
[client.rgw.lab1]
rgw_frontends = "civetweb port=80"
systemctl restart ceph-radosgw@rgw.lab1
curl -I http://11.11.11.111/
复制代码
使用ceph存储
应用存储使用架构图
对象存储
yum install -y ceph
ceph-deploy admin lab4
echo 'hello ceph oject storage' > testfile.txt
ceph osd pool create mytest 8
rados put test-object-1 testfile.txt --pool=mytest
rados -p mytest ls
rados get test-object-1 testfile.txt.1 --pool=mytest
cat testfile.txt.1
ceph osd map mytest test-object-1
rados rm test-object-1 --pool=mytest
ceph osd pool rm mytest mytest --yes-i-really-really-mean-it
复制代码
块存储
yum install -y ceph
ceph-deploy admin lab4
rbd create foo --size 4096 --image-feature layering
rbd info foo
rados -p rbd ls
sudo rbd map foo --name client.admin
sudo mkfs.ext4 -m0 /dev/rbd/rbd/foo
sudo mkdir /mnt/ceph-block-device
sudo mount /dev/rbd/rbd/foo /mnt/ceph-block-device
cd /mnt/ceph-block-device
echo 'hello ceph block storage' > testfile.txt
cd ~
sudo umount -lf /mnt/ceph-block-device
sudo rbd unmap foo
rbd remove foo
rados -p rbd ls
复制代码
S3对象存储
11.11.11.111 为安装了 RGW 的机器
yum install -y ceph ceph-radosgw
ceph-deploy admin lab4
ceph osd pool create .rgw 128 128
ceph osd pool create .rgw.root 128 128
ceph osd pool create .rgw.control 128 128
ceph osd pool create .rgw.gc 128 128
ceph osd pool create .rgw.buckets 128 128
ceph osd pool create .rgw.buckets.index 128 128
ceph osd pool create .rgw.buckets.extra 128 128
ceph osd pool create .log 128 128
ceph osd pool create .intent-log 128 128
ceph osd pool create .usage 128 128
ceph osd pool create .users 128 128
ceph osd pool create .users.email 128 128
ceph osd pool create .users.swift 128 128
ceph osd pool create .users.uid 128 128
rados lspools
curl -I http://11.11.11.111/
radosgw-admin user create --uid=foo --display-name=foo --email=foo@foo.com
radosgw-admin user create --uid=admin --display-name=admin
radosgw-admin caps add --uid=admin --caps="users=*"
radosgw-admin caps add --uid=admin --caps="usage=read,write"
yum install -y s3cmd
s3cmd --configure
vim $HOME/.s3cfg
host_base = 11.11.11.111
host_bucket = 11.11.11.111/%(bucket)
use_https = False
s3cmd mb s3://mybucket
s3cmd ls
echo 'hello ceph block storage s3' > hello.txt
s3cmd put hello.txt s3://mybucket
s3cmd ls s3://mybucket
cd /tmp
s3cmd get s3://mybucket/hello.txt
cd ~
s3cmd del -rf s3://mybucket/
s3cmd ls -r s3://mybucket
s3cmd mb s3://mybucket1
s3cmd rb s3://mybucket1
radosgw-admin user rm --uid=foo
radosgw-admin user rm --uid=admin
ceph osd pool delete .rgw .rgw --yes-i-really-really-mean-it
ceph osd pool delete .rgw.root .rgw.root --yes-i-really-really-mean-it
ceph osd pool delete .rgw.control .rgw.control --yes-i-really-really-mean-it
ceph osd pool delete .rgw.gc .rgw.gc --yes-i-really-really-mean-it
ceph osd pool delete .rgw.buckets .rgw.buckets --yes-i-really-really-mean-it
ceph osd pool delete .rgw.buckets.index .rgw.buckets.index --yes-i-really-really-mean-it
ceph osd pool delete .rgw.buckets.extra .rgw.buckets.extra --yes-i-really-really-mean-it
ceph osd pool delete .log .log --yes-i-really-really-mean-it
ceph osd pool delete .intent-log .intent-log --yes-i-really-really-mean-it
ceph osd pool delete .usage .usage --yes-i-really-really-mean-it
ceph osd pool delete .users .users --yes-i-really-really-mean-it
ceph osd pool delete .users.email .users.email --yes-i-really-really-mean-it
ceph osd pool delete .users.swift .users.swift --yes-i-really-really-mean-it
ceph osd pool delete .users.uid .users.uid --yes-i-really-really-mean-it
复制代码
CephFS存储
yum install -y ceph ceph-fuse
ceph-deploy admin lab4
ceph osd pool create fs_data 128
ceph osd pool create fs_metadata 128
ceph osd lspools
ceph fs new cephfs fs_metadata fs_data
ceph fs ls
mkdir /mnt/mycephfs
mount -t ceph lab1:6789,lab2:6789,lab3:6789:/ /mnt/mycephfs -o name=admin,secret=AQBoclRaiilZJBAACLjqg2OUOOB/FNa20UJXYA==
df -h
cd /mnt/mycephfs
echo 'hello ceph CephFS' > hello.txt
cd ~
umount -lf /mnt/mycephfs
rm -rf /mnt/mycephfs
mkdir /mnt/mycephfs
ceph-fuse -m lab1:6789 /mnt/mycephfs
df -h
cd /mnt/mycephfs
echo 'hello ceph CephFS' > hello.txt
cd ~
umount -lf /mnt/mycephfs
rm -rf /mnt/mycephfs
systemctl stop ceph-mds@lab1
ceph fs rm cephfs --yes-i-really-mean-it
ceph osd pool delete fs_data fs_data --yes-i-really-really-mean-it
ceph osd pool delete fs_metadata fs_metadata --yes-i-really-really-mean-it
systemctl start ceph-mds@lab1