Xây dựng cụm server lưu trữ dữ liệu cho doanh nghiệp với Ceph

Ceph Storage Cluster

1. Giới thiệu

Một máy chủ storage với nhiều disk

Một thiết bị lưu trữ mạng Synology

2. Các công nghệ sử dụng

3. Mô hình triển khai

4. Triển khai Ceph cluster

apt update 

apt install -y podman tree screen net-tools git wget bash-completion software-properties-common jq ntpdate iptables-persistent netfilter-persistent lldpd arp-scan ntp ntpdate

4.1. Install cephadm , ceph-common

# On 3 node MON
# Install cephadm  
CEPH_RELEASE=19.2.0
curl --silent --remote-name --location https://download.ceph.com/rpm-${CEPH_RELEASE}/el9/noarch/cephadm
chmod +x cephadm
mv cephadm  /usr/local/bin/

# Install ceph-common
cephadm add-repo --version 19.2.0
cephadm install ceph-common

# Confirm
ceph -v

4.2. Khởi tạo Ceph cluster

https://www.ibm.com/docs/en/storage-ceph/5?topic=dashboard-ceph-installation-access

# Only on ceph-mon-11
# bootstrap ceph cluster

cephadm bootstrap \
  --allow-fqdn-hostname \
  --mon-ip 10.10.1.11 \
  --dashboard-password-noupdate \
  --initial-dashboard-user admin \
  --initial-dashboard-password adminpwd

# From ceph-mon-11 Copy Ceph SSH key to others Control Nodes
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-mon-12
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-mon-13

# Add Control Nodes to the Ceph cluster
ceph orch host add ceph-mon-12
ceph orch host add ceph-mon-13

# Label the nodes with _admin
ceph orch host label add ceph-mon-11 _admin
ceph orch host label add ceph-mon-12 _admin
ceph orch host label add ceph-mon-13 _admin

# Sync config to to Control Nodes
for i in 10.10.1.11 10.10.1.12 10.10.1.13 ; do rsync -av /etc/ceph/{ceph.conf,ceph.client.admin.keyring,ceph.pub} root@$i:/etc/ceph/ ; done 

# Confirm host
ceph orch host ls --detail

4.3. Deploy Ceph Monitor Daemon (mon), Ceph Manager Daemon (mgr)

## Label the nodes with mon,mgr
ceph orch host label add ceph-mon-11 mon
ceph orch host label add ceph-mon-12 mon
ceph orch host label add ceph-mon-13 mon

ceph orch host label add ceph-mon-11 mgr
ceph orch host label add ceph-mon-12 mgr
ceph orch host label add ceph-mon-13 mgr

# Confirm 
ceph orch host ls --detail

# Apply configs mon , mgr
ceph orch apply mon --placement="3 ceph-mon-11 ceph-mon-13 ceph-mon-13"
ceph orch apply mgr --placement="3 ceph-mon-11 ceph-mon-13 ceph-mon-13"

# Confim 
ceph -s
ceph orch ls
ceph orch ps 

4.4. Deploy Ceph Object Daemon (osd)

# Only On ceph-mon-11
# Copy SSH Key To OSD Nodes
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-osd-14
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-osd-15
ssh-copy-id -f -i /etc/ceph/ceph.pub root@ceph-osd-16

# Add hosts to cluster 
ceph orch host add ceph-osd-14
ceph orch host add ceph-osd-15
ceph orch host add ceph-osd-16

# Give new nodes labels 
ceph orch host label add ceph-osd-14 osd
ceph orch host label add ceph-osd-15 osd
ceph orch host label add ceph-osd-16 osd

# Confirm hosts
ceph orch host ls --detail

A storage device is considered available if all of the following conditions are met:

The device must have no partitions.

The device must not have any LVM state.

The device must not be mounted.

The device must not contain a file system.

The device must not contain a Ceph BlueStore OSD.

The device must be larger than 5 GB.

# View all devices on storage nodes
ceph orch device ls --refresh

# Tell Ceph to consume the available and unused storage device 
ceph orch daemon add osd ceph-osd-14:/dev/sdb
ceph orch daemon add osd ceph-osd-14:/dev/sdc
ceph orch daemon add osd ceph-osd-14:/dev/sdd

ceph orch daemon add osd ceph-osd-15:/dev/sdb
ceph orch daemon add osd ceph-osd-15:/dev/sdc
ceph orch daemon add osd ceph-osd-15:/dev/sdd

ceph orch daemon add osd ceph-osd-16:/dev/sdb
ceph orch daemon add osd ceph-osd-16:/dev/sdc
ceph orch daemon add osd ceph-osd-16:/dev/sdd

5. Ceph admin dashboard - Grafana monitor dashboard

6. Ceph block storage

6.1. Ví dụ tạo Pool replicate 3, tạo Image size 100GB

ceph osd pool create production 
ceph osd pool application enable production rbd
rbd pool init -p production

rbd create file-share --size 102400 --pool production
rbd ls production
rbd info production/file-share 

6.2. Sử dụng Ceph block storage trên Ubuntu server

root@ubuntu-server:~# cat /etc/ceph/ceph.conf
# minimal ceph.conf for cc6b42ba-943b-11ef-9a15-b1833c256ed8
[global]
	fsid = cc6b42ba-943b-11ef-9a15-b1833c256ed8
	mon_host = [v2:10.10.1.11:3300/0,v1:10.10.1.11:6789/0] [v2:10.10.1.12:3300/0,v1:10.10.1.12:6789/0] [v2:10.10.1.13:3300/0,v1:10.10.1.13:6789/0]

root@ubuntu-server:~#
root@ubuntu-server:~# cat /etc/ceph/ceph.keyring 
[client.admin]
	key = AQBdPx5n0agJOxAACDY6rZbpJxFU5Ig7NjdZtw==

root@ubuntu-server:~# 
root@ubuntu-server:~# ceph osd lspools
1 .mgr
4 production
root@ubuntu-server:~# rbd ls -l production
NAME        SIZE     PARENT  FMT  PROT  LOCK
file-share  100 GiB            2            
root@ubuntu-server:~# 

# map the block device
root@ubuntu-server:~# rbd map file-share -p production
/dev/rbd0

# confirm
root@ubuntu-server:~# rbd showmapped
id  pool        namespace  image       snap  device   
0   production             file-share  -     /dev/rbd0
root@ubuntu-server:~#

# format with ext4
root@ubuntu-server:~# mkfs.ext4 /dev/rbd0

# mount folder 
mkdir -p /opt/ceph-data
mount /dev/rbd0 /opt/ceph-data



# umount 
root@ubuntu-server:~# umount /opt/ceph-data/

# unmap
root@ubuntu-server:~# rbd unmap /dev/rbd0

# delete image 
root@ubuntu-server:~# rbd rm file-share -p production

# delete a pool
# ceph osd pool delete [Pool Name] [Pool Name] ***
root@ubuntu-server:~# ceph osd pool delete production production --yes-i-really-really-mean-it


6.3. Sử dụng Ceph block storage trên Windows server

https://docs.ceph.com/en/reef/install/windows-install/

[global]
    log to stderr = true
    run dir = C:/ProgramData/Ceph/out
    crash dir = C:/ProgramData/Ceph/out

[client]
    keyring = C:/ProgramData/Ceph/ceph.keyring
    log file = C:/ProgramData/Ceph/out/$name.$pid.log
    admin socket = C:/ProgramData/Ceph/out/$name.$pid.asok

[global]
    mon host = 10.10.1.11,10.10.1.12,10.10.1.13
[client.admin]
	key = AQBdPx5n0agJOxAACDY6rZbpJxFU5Ig7NjdZtw==

 rbd.exe map -n client.admin  production/file-share