Difference between revisions of "Kubernetes/SAN-Storage"
Jump to navigation
Jump to search
(Created page with "= Ceph = <source lang=bash> # Ubuntu 20.04 sudo apt install ceph-common # Config file cat > ~/.ceph/ceph.conf <<EOF [global] mon_host = XXXXXXX keyring = ~/.ceph/ceph.client....") |
(→Ceph) |
||
| Line 8: | Line 8: | ||
[global] | [global] | ||
mon_host = XXXXXXX | mon_host = XXXXXXX | ||
keyring = | keyring = /home/myuser/.ceph/ceph.client.admin.keyring # requires full absolute path | ||
auth_cluster_required = cephx | auth_cluster_required = cephx | ||
auth_service_required = cephx | auth_service_required = cephx | ||
| Line 21: | Line 21: | ||
# Test | # Test | ||
ceph -c ~/.ceph/ceph.conf status | ceph -c ~/.ceph/ceph.conf status | ||
ceph -c ~/.ceph/ceph.conf status | |||
cluster: | |||
id: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeeee | |||
health: HEALTH_OK | |||
services: | |||
mon: 3 daemons, quorum dc1ceph11-2222,dc2ceph21-3333,dc3ceph31-4444 (age 4h) | |||
mgr: dc1ceph11-2222.dddddd(active, since 51m), standbys: dc2ceph21-3333.eeeeee | |||
mds: devcephfs:1 {0=devcephfs.dc3ceph31-4444.nmngty=up:active} 2 up:standby | |||
osd: 20 osds: 19 up (since 4d), 19 in (since 4d) | |||
rgw: 1 daemon active (admin) | |||
task status: | |||
scrub status: | |||
mds.devcephfs.dc3ceph31-4444.nmngty: idle | |||
data: | |||
pools: 22 pools, 449 pgs | |||
objects: 10.77M objects, 25 TiB | |||
usage: 54 TiB used, 85 TiB / 139 TiB avail | |||
pgs: 447 active+clean | |||
2 active+clean+scrubbing+deep | |||
io: | |||
client: 27 MiB/s rd, 5.6 MiB/s wr, 3.88k op/s rd, 191 op/s wr | |||
# Aliases | # Aliases | ||
Revision as of 14:24, 16 November 2021
Ceph
# Ubuntu 20.04
sudo apt install ceph-common
# Config file
cat > ~/.ceph/ceph.conf <<EOF
[global]
mon_host = XXXXXXX
keyring = /home/myuser/.ceph/ceph.client.admin.keyring # requires full absolute path
auth_cluster_required = cephx
auth_service_required = cephx
auth_client_required = cephx
EOF
cat > ~/.ceph/ceph.client.admin.keyring <<EOF
[client.admin]
key = XXXXXXXXXXXX==
EOF
# Test
ceph -c ~/.ceph/ceph.conf status
ceph -c ~/.ceph/ceph.conf status
cluster:
id: aaaaaaaa-bbbb-cccc-dddd-eeeeeeeeeeeee
health: HEALTH_OK
services:
mon: 3 daemons, quorum dc1ceph11-2222,dc2ceph21-3333,dc3ceph31-4444 (age 4h)
mgr: dc1ceph11-2222.dddddd(active, since 51m), standbys: dc2ceph21-3333.eeeeee
mds: devcephfs:1 {0=devcephfs.dc3ceph31-4444.nmngty=up:active} 2 up:standby
osd: 20 osds: 19 up (since 4d), 19 in (since 4d)
rgw: 1 daemon active (admin)
task status:
scrub status:
mds.devcephfs.dc3ceph31-4444.nmngty: idle
data:
pools: 22 pools, 449 pgs
objects: 10.77M objects, 25 TiB
usage: 54 TiB used, 85 TiB / 139 TiB avail
pgs: 447 active+clean
2 active+clean+scrubbing+deep
io:
client: 27 MiB/s rd, 5.6 MiB/s wr, 3.88k op/s rd, 191 op/s wr
# Aliases
alias ceph="ceph -c ~/.ceph/ceph.conf"
alias rbd="rbd -c ~/.ceph/ceph.conf"