CEPH

From Alpine Linux
Revision as of 09:56, 7 May 2024 by Larena (talk | contribs) (Created page with "{{Draft|work in progress}} CEPH is a software defined storage platform. There are various methods and tools to deploy CEPH, none of which (to my knowledge, please correct this statement if wrong) work on Alpine. However CEPH has been available in the community repository since Alpine 3.10. Thanks to the maintainer that has done an amazing job, even maintaining multiple versions! So that's why this how-to, I will show how to deploy CEPH monitors, managers and OSDs, via A...")
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
This material is work-in-progress ...

work in progress
(Last edited by Larena on 7 May 2024.)

CEPH is a software defined storage platform. There are various methods and tools to deploy CEPH, none of which (to my knowledge, please correct this statement if wrong) work on Alpine. However CEPH has been available in the community repository since Alpine 3.10. Thanks to the maintainer that has done an amazing job, even maintaining multiple versions! So that's why this how-to, I will show how to deploy CEPH monitors, managers and OSDs, via APK, manually. Eventually I'm planning to write an Ansible playbook Alpine-specific.

Installing MONITORs

Bootstrap the first MONITOR

In this example we use 3 hosts as monitor role:

MON_HOST1="mon01"
MON_HOST1_IP="%IP_ADDRESS%"
MON_HOST2="mon02"
MON_HOST2_IP="%IP_ADDRESS%"
MON_HOST3="mon03"
MON_HOST3_IP="%IP_ADDRESS%"
FSID=$(cat /proc/sys/kernel/random/uuid) #store this FSID
echo $FSID
CLUSTER_NAME="ceph" # default value if unspecified
HOSTNAME=$(cat /etc/hostname)
PUBLIC_NETWORK="%NETWORK_CIDR%"
MYNET="%NETWORK_CIDR%"
VERSION=17


#apk add ceph$VERSION-mon ceph$VERSION-mon-daemon ceph$VERSION-mon-tools ceph$VERSION-openrc sudo


cat << EOF > /etc/ceph/$CLUSTER_NAME.conf 
[global]
# Cluster unique identifier
fsid = $FSID
mon_initial_members = $MON_HOST1
mon_host = $MON_HOST1_IP, $MON_HOST2_IP, $MON_HOST3_IP
mon_allow_pool_delete = true
ms_bind_ipv4 = false
ms_bind_ipv6 = true
public_network = $PUBLIC_NETWORK
# Enable authentication
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
# https://docs.ceph.com/en/latest/rados/configuration/pool-pg-config-ref/#pool-pg-and-crush-config-reference
osd_pool_default_size = 3 # Write an object three times
osd_pool_default_min_size = 2 # Accept an I/O operation to a degraded PG that has two copies of an object
osd_pool_default_pg_num = 128 # total number of OSDs * 100 / osd_pool_default_size. Use nearest power of two.
osd_crush_chooseleaf_type = 1
rgw_data = /var/lib/ceph/radosgw/\$cluster-\$id # literal variables

[mon]
mon_data = /var/lib/ceph/mon/\$cluster-$FSID # "cluster" is a literal variable
EOF

ceph-authtool --create-keyring /tmp/$CLUSTER_NAME.mon.keyring --gen-key -n mon. --cap mon 'allow *'
ceph-authtool --create-keyring /etc/ceph/$CLUSTER_NAME.client.admin.keyring --gen-key -n client.admin --cap mon 'allow *' --cap osd 'allow *' --cap mds 'allow *' --cap mgr 'allow *'
ceph-authtool --create-keyring /var/lib/ceph/bootstrap-osd/$CLUSTER_NAME.keyring --gen-key -n client.bootstrap-osd --cap mon 'profile bootstrap-osd' --cap mgr 'allow r'

ceph-authtool /tmp/ceph.mon.keyring --import-keyring /etc/ceph/$CLUSTER_NAME.client.admin.keyring
ceph-authtool /tmp/ceph.mon.keyring --import-keyring /var/lib/ceph/bootstrap-osd/$CLUSTER_NAME.keyring

chown ceph:ceph /tmp/$CLUSTER_NAME.mon.keyring

monmaptool --create --add $MON_HOST1 $MON_HOST1_IP --add $MON_HOST2 $MON_HOST2_IP --add $MON_HOST3 $MON_HOST3_IP --fsid $FSID /tmp/monmap

install -d -o ceph /var/lib/ceph/mon/$CLUSTER_NAME-$FSID

sudo -u ceph ceph-mon --cluster $CLUSTER_NAME --mkfs -i $HOSTNAME --inject-monmap /tmp/monmap --keyring /tmp/$CLUSTER_NAME.mon.keyring


Create AWall policies:

cat << EOF > /etc/awall/optional/ceph-mon.json
{
  "description": "Ceph cluster monitor component",

  "service": {
    "ceph-mon": { "proto": "tcp", "port": [ 3300, 6789 ] }
  },

  "filter": [
    {
      "src": "\$MYNET",
      "out": "_fw",
      "service": "ceph-mon",
      "action": "accept"
    },
    {
      "in": "_fw",
      "dest": "\$MYNET",
      "service": "ceph-mon",
      "action": "accept"
    }
  ]
}
EOF

cat << EOF > /etc/awall/optional/ceph-client-osd.json
{
  "description": "Ceph cluster OSD client",
  
  "service": {
    "ceph-osd": { "proto": "tcp", "port": "6800-7300" }
  },

  "filter": [
    {
      "in": "_fw",
      "dest": "\$MYNET",
      "service": "ceph-osd",
      "action": "accept"
    }
  ]
}
EOF
awall enable ceph-mon
awall enable ceph-client-osd
awall activate -f
ln -s ceph /etc/init.d/ceph-mon.$HOSTNAME
rc-update add ceph-mon.$HOSTNAME
openrc

Inspect that the first node was bootstrapped correctly running

ceph -s

Add other MONITOR nodes

Copy /etc/ceph/ceph.conf, /tmp/ceph.mon.keyring /etc/ceph/ceph.client.admin.keyring /var/lib/ceph/bootstrap-osd/ceph.keyring from existing monitor to the new monitor node.

install -d --owner ceph --group ceph /etc/ceph /var/lib/ceph/bootstrap-osd
install -d -o ceph /var/lib/ceph/mon/$CLUSTER_NAME-$FSID
chown ceph /tmp/$CLUSTER_NAME.mon.keyring

monmaptool --create --add $MON_HOST1 $MON_HOST1_IP --add $MON_HOST2 $MON_HOST2_IP --add $MON_HOST3 $MON_HOST3_IP --fsid $FSID /tmp/monmap

sudo -u ceph ceph-mon --cluster $CLUSTER_NAME --mkfs -i $HOSTNAME --inject-monmap /tmp/monmap --keyring /tmp/$CLUSTER_NAME.mon.keyring

ln -s ceph /etc/init.d/ceph-mon.$HOSTNAME
rc-update add ceph-mon.$HOSTNAME
openrc

If "ceph -s2 returns "mon is allowing insecure global_id reclaim" fix with

ceph config set mon auth_allow_insecure_global_id_reclaim false