1. 사전작업

[root@sllee-ceph01 /]# ceph osd pool create kubernetes 64
pool 'kubernetes' created

[root@sllee-ceph01 /]# rbd pool init kubernetes

[root@sllee-ceph01 /]# ceph auth get-or-create client.kubernetes mon 'profile rbd' osd 'profile rbd pool=kubernetes'
[client.kubernetes]
        key = AQDBlORjNW6aDRAAEdITaRjL42YUdZ3uxmOjgg==

 

 

2. Ceph 정보확인

[root@sllee-ceph01 /]# ceph -s
  cluster:
    id:     2515bf16-1268-455a-b03b-69328a32fbc5   # ID 확인
    health: HEALTH_OK

  services:
    mon: 3 daemons, quorum sllee-ceph01,sllee-ceph02,sllee-ceph03 (age 27h)
    mgr: sllee-ceph02(active, since 27h), standbys: sllee-ceph01, sllee-ceph03
    mds: cephfs:1 {0=sllee-ceph01=up:active} 2 up:standby
    osd: 9 osds: 9 up (since 27h), 9 in (since 27h)

  data:
    pools:   10 pools, 321 pgs
    objects: 23 objects, 2.3 KiB
    usage:   9.1 GiB used, 351 GiB / 360 GiB avail
    pgs:     321 active+clean



[root@sllee-ceph01 /]# ceph fs status
cephfs - 0 clients
======
RANK  STATE       MDS          ACTIVITY     DNS    INOS
 0    active  sllee-ceph01  Reqs:    0 /s    10     13
      POOL         TYPE     USED  AVAIL
cephfs_metadata  metadata  1536k   110G
  cephfs_data      data       0    110G
STANDBY MDS
sllee-ceph03
sllee-ceph02
MDS version: ceph version 15.2.17 (8a82819d84cf884bd39c17e3236e0632ac146dc4) octopus (stable)



[root@sllee-ceph01 /]# ceph auth get client.admin
exported keyring for client.admin
[client.admin]
        key = AQCdWuRjWZt4JxAAnxNuJzkXnavYNKK7l6+KYw==     #Key 확인
        caps mds = "allow *"
        caps mgr = "allow *"
        caps mon = "allow *"
        caps osd = "allow *"

 

 

3. CephFS

[root@sllee-master01 ceph-csi]# tee >  deploy/cephfs/kubernetes/csi-config-map.yaml << EOF
---
apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "2515bf16-1268-455a-b03b-69328a32fbc5",
        "monitors": [
          "10.10.0.21:6789",
          "10.10.0.22:6789",
          "10.10.0.23:6789"
        ],
        "cephFS" : {
          "subvolumeGroup": "cephfs"
        }
      }
    ]
metadata:
  name: ceph-csi-config
EOF




[root@sllee-master01 ceph-csi]# tee > examples/cephfs/secret.yaml << EOF
---
apiVersion: v1
kind: Secret
metadata:
  name: csi-cephfs-secret
  namespace: default
stringData:
  # Required for statically provisioned volumes
  userID: admin
  userKey: AQCdWuRjWZt4JxAAnxNuJzkXnavYNKK7l6+KYw==
 
  # Required for dynamically provisioned volumes
  adminID: admin
  adminKey: AQCdWuRjWZt4JxAAnxNuJzkXnavYNKK7l6+KYw==
EOF




[root@sllee-master01 ceph-csi]# vi examples/cephfs/storageclass.yaml
...
  clusterID: 2515bf16-1268-455a-b03b-69328a32fbc5
...
  fsName: cephfs
...


[root@sllee-master01 ceph-csi]# kubectl apply -f examples/ceph-conf.yaml


[root@sllee-master01 ceph-csi]# cd examples/cephfs/
[root@sllee-master01 cephfs]# ./plugin-deploy.sh
serviceaccount/cephfs-csi-provisioner created
clusterrole.rbac.authorization.k8s.io/cephfs-external-provisioner-runner created
clusterrolebinding.rbac.authorization.k8s.io/cephfs-csi-provisioner-role created
role.rbac.authorization.k8s.io/cephfs-external-provisioner-cfg created
rolebinding.rbac.authorization.k8s.io/cephfs-csi-provisioner-role-cfg created
serviceaccount/cephfs-csi-nodeplugin created
Error from server (AlreadyExists): error when creating "./csi-config-map.yaml": configmaps "ceph-csi-config" already exists
service/csi-cephfsplugin-provisioner created
deployment.apps/csi-cephfsplugin-provisioner created
daemonset.apps/csi-cephfsplugin created
service/csi-metrics-cephfsplugin created



### pods를 보면 cephfs, rbd에 대한 csi pods가 pending 상태에 있는 것을 확인 할 수 있음.
### worker1, worker2에는 모두 올라가있으니 deploy를 edit하여 replicas 개수를 3에서 2로 줄여 줌

[root@sllee-master01 cephfs]# kubectl get pod -o wide
NAME                                           READY   STATUS    RESTARTS   AGE     IP             NODE             NOMINATED NODE   READINESS GATES
csi-cephfsplugin-8ggv2                         3/3     Running   0          24m     10.20.0.45     sllee-worker02   <none>           <none>
csi-cephfsplugin-b6svc                         3/3     Running   0          24m     10.20.0.44     sllee-worker01   <none>           <none>
csi-cephfsplugin-provisioner-858dd6bb6-799p9   0/5     Pending   0          2m34s   <none>         <none>           <none>           <none>
csi-cephfsplugin-provisioner-858dd6bb6-97f9s   5/5     Running   0          24m     10.233.93.4    sllee-worker02   <none>           <none>
csi-cephfsplugin-provisioner-858dd6bb6-x9bdx   5/5     Running   0          24m     10.233.108.3   sllee-worker01   <none>           <none>



[root@sllee-master01 cephfs]# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS    AGE
csi-cephfs-pvc   Bound    pvc-71a28749-b06a-4247-bd00-68ba8c110d74   1Gi        RWX            csi-cephfs-sc   6s

[root@sllee-master01 cephfs]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS    REASON   AGE
pvc-71a28749-b06a-4247-bd00-68ba8c110d74   1Gi        RWX            Delete           Bound    default/csi-cephfs-pvc   csi-cephfs-sc            46s

 

 

 

4. RBD

[root@sllee-master01 ceph-csi]# tee > deploy/rbd/kubernetes/csi-config-map.yaml << EOF
---
apiVersion: v1
kind: ConfigMap
data:
  config.json: |-
    [
      {
        "clusterID": "2515bf16-1268-455a-b03b-69328a32fbc5",
        "monitors": [
          "10.10.0.21:6789",
          "10.10.0.22:6789",
          "10.10.0.23:6789"
        ]
      }
    ]
metadata:
  name: ceph-csi-rbd-config
 
EOF



[root@sllee-master01 ceph-csi]# tee > examples/rbd/secret.yaml << EOF
---
apiVersion: v1
kind: Secret
metadata:
  name: csi-rbd-secret
  namespace: default
stringData:
  # Key values correspond to a user name and its key, as defined in the
  # ceph cluster. User ID should have required access to the 'pool'
  # specified in the storage class
  userID: kubernetes
  userKey: AQDBlORjNW6aDRAAEdITaRjL42YUdZ3uxmOjgg==
  
  # Encryption passphrase
  encryptionPassphrase: test_passphrase
 
EOF


[root@sllee-master01 ceph-csi]# kubectl apply -f examples/rbd/secret.yaml
secret/csi-rbd-secret configured


[root@sllee-master01 ceph-csi]# vi examples/rbd/storageclass.yaml
   ...
   clusterID: 2515bf16-1268-455a-b03b-69328a32fbc5
   ...
   pool: kubernetes
   ...
   
[root@sllee-master01 ceph-csi]# kubectl create -f examples/rbd/storageclass.yaml
storageclass.storage.k8s.io/csi-rbd-sc created
   
[root@sllee-master01 ceph-csi]# kubectl apply -f examples/ceph-conf.yaml


[root@sllee-master01 ceph-csi]# cd examples/rbd/
[root@sllee-master01 rbd]# ./plugin-deploy.sh



[root@sllee-master01 rbd]# kubectl apply -f pvc.yaml
persistentvolumeclaim/rbd-pvc unchanged

[root@sllee-master01 rbd]# kubectl get pv
NAME                                       CAPACITY   ACCESS MODES   RECLAIM POLICY   STATUS   CLAIM                    STORAGECLASS    REASON   AGE
pvc-1fdf70ea-4fe3-4e34-938f-5b68ba151264   1Gi        RWO            Delete           Bound    default/rbd-pvc          csi-rbd-sc               14m
pvc-71a28749-b06a-4247-bd00-68ba8c110d74   1Gi        RWX            Delete           Bound    default/csi-cephfs-pvc   csi-cephfs-sc            55m

[root@sllee-master01 rbd]# kubectl get pvc
NAME             STATUS   VOLUME                                     CAPACITY   ACCESS MODES   STORAGECLASS    AGE
csi-cephfs-pvc   Bound    pvc-71a28749-b06a-4247-bd00-68ba8c110d74   1Gi        RWX            csi-cephfs-sc   55m
rbd-pvc          Bound    pvc-1fdf70ea-4fe3-4e34-938f-5b68ba151264   1Gi        RWO            csi-rbd-sc      14m

 

 

https://github.com/ceph/ceph-csi

'Kubernetes' 카테고리의 다른 글

[K8s] 유용한 명령어  (0) 2023.03.08
API 사용법  (0) 2023.02.21
OS 옵션 변경  (0) 2022.12.26
[kubespray] 1.22 -> 1.23 Upgrade: Calico Error  (0) 2022.12.01
[LoadBalancer] MetalLB  (0) 2022.10.28

+ Recent posts