Add storageclass and fs for rook cephfs
This commit is contained in:
parent
b5967c57af
commit
9fed13b09c
18
k8s/rook/filesystem.yaml
Normal file
18
k8s/rook/filesystem.yaml
Normal file
|
@ -0,0 +1,18 @@
|
|||
---
|
||||
apiVersion: ceph.rook.io/v1
|
||||
kind: CephFilesystem
|
||||
metadata:
|
||||
name: rookfs
|
||||
namespace: rook-ceph
|
||||
spec:
|
||||
metadataPool:
|
||||
replicated:
|
||||
size: 3
|
||||
dataPools:
|
||||
- name: replicated
|
||||
replicated:
|
||||
size: 3
|
||||
preserveFilesystemOnDelete: true
|
||||
metadataServer:
|
||||
activeCount: 1
|
||||
activeStandby: true
|
28
k8s/rook/storageclass.yaml
Normal file
28
k8s/rook/storageclass.yaml
Normal file
|
@ -0,0 +1,28 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: rook-cephfs
|
||||
# Change "rook-ceph" provisioner prefix to match the operator namespace if needed
|
||||
provisioner: rook-ceph.cephfs.csi.ceph.com
|
||||
parameters:
|
||||
# clusterID is the namespace where the rook cluster is running
|
||||
# If you change this namespace, also change the namespace below where the secret namespaces are defined
|
||||
clusterID: rook-ceph
|
||||
|
||||
# CephFS filesystem name into which the volume shall be created
|
||||
fsName: rookfs
|
||||
|
||||
# Ceph pool into which the volume shall be created
|
||||
# Required for provisionVolume: "true"
|
||||
pool: rookfs-replicated
|
||||
|
||||
# The secrets contain Ceph admin credentials. These are generated automatically by the operator
|
||||
# in the same namespace as the cluster.
|
||||
csi.storage.k8s.io/provisioner-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/provisioner-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/controller-expand-secret-name: rook-csi-cephfs-provisioner
|
||||
csi.storage.k8s.io/controller-expand-secret-namespace: rook-ceph
|
||||
csi.storage.k8s.io/node-stage-secret-name: rook-csi-cephfs-node
|
||||
csi.storage.k8s.io/node-stage-secret-namespace: rook-ceph
|
||||
|
||||
reclaimPolicy: Delete
|
Loading…
Reference in a new issue