Rook ceph не развертывается
Похоже, у меня возникла проблема с развертыванием rook ceph в кластере k8s на Fedora-coreos. Я надеюсь, что кто-то может помочь мне решить проблему, я искал в Google, но не могу найти решение для этого. Ниже мой кластерный yaml и ошибка. Я использовал стандартные common.yaml и operator.yaml перед развертыванием пользовательского cluster.yaml. Спасибо
кластер-h2.yaml:
#################################################################################################################
# Define the settings for the rook-ceph cluster with common settings for a production cluster.
# All nodes with available raw devices will be used for the Ceph cluster. At least three nodes are required
# in this example. See the documentation for more details on storage settings available.
# For example, to create the cluster:
# kubectl create -f common.yaml
# kubectl create -f operator.yaml
# kubectl create -f cluster.yaml
#################################################################################################################
apiVersion: ceph.rook.io/v1
kind: CephCluster
metadata:
name: rook-ceph
namespace: rook-ceph
spec:
cephVersion:
# The container image used to launch the Ceph daemon pods (mon, mgr, osd, mds, rgw).
# v13 is mimic, v14 is nautilus, and v15 is octopus.
# RECOMMENDATION: In production, use a specific version tag instead of the general v14 flag, which pulls the latest release and could result in different
# versions running within the cluster. See tags available at https://hub.docker.com/r/ceph/ceph/tags/.
# If you want to be more precise, you can always use a timestamp tag such ceph/ceph:v14.2.5-20190917
# This tag might not contain a new Ceph version, just security fixes from the underlying operating system, which will reduce vulnerabilities
image: ceph/ceph:v14.2.8
# Whether to allow unsupported versions of Ceph. Currently mimic and nautilus are supported, with the recommendation to upgrade to nautilus.
# Octopus is the version allowed when this is set to true.
# Do not set to true in production.
allowUnsupported: false
# The path on the host where configuration files will be persisted. Must be specified.
# Important: if you reinstall the cluster, make sure you delete this directory from each host or else the mons will fail to start on the new cluster.
# In Minikube, the '/data' directory is configured to persist across reboots. Use "/data/rook" in Minikube environment.
dataDirHostPath: /var/lib/rook
# Whether or not upgrade should continue even if a check fails
# This means Ceph's status could be degraded and we don't recommend upgrading but you might decide otherwise
# Use at your OWN risk
# To understand Rook's upgrade process of Ceph, read https://rook.io/docs/rook/master/ceph-upgrade.html#ceph-version-upgrades
skipUpgradeChecks: false
# Whether or not continue if PGs are not clean during an upgrade
continueUpgradeAfterChecksEvenIfNotHealthy: false
# set the amount of mons to be started
mon:
count: 3
allowMultiplePerNode: false
# mgr:
# modules:
# Several modules should not need to be included in this list. The "dashboard" and "monitoring" modules
# are already enabled by other settings in the cluster CR and the "rook" module is always enabled.
# - name: pg_autoscaler
# enabled: true
# enable the ceph dashboard for viewing cluster status
dashboard:
enabled: true
# serve the dashboard under a subpath (useful when you are accessing the dashboard via a reverse proxy)
# urlPrefix: /ceph-dashboard
# serve the dashboard at the given port.
port: 8080
# serve the dashboard using SSL
ssl: false
# enable prometheus alerting for cluster
monitoring:
# requires Prometheus to be pre-installed
enabled: false
# namespace to deploy prometheusRule in. If empty, namespace of the cluster will be used.
# Recommended:
# If you have a single rook-ceph cluster, set the rulesNamespace to the same namespace as the cluster or keep it empty.
# If you have multiple rook-ceph clusters in the same k8s cluster, choose the same namespace (ideally, namespace with prometheus
# deployed) to set rulesNamespace for all the clusters. Otherwise, you will get duplicate alerts with multiple alert definitions.
rulesNamespace: rook-ceph
network:
# toggle to use hostNetwork
hostNetwork: false
rbdMirroring:
# The number of daemons that will perform the rbd mirroring.
# rbd mirroring must be configured with "rbd mirror" from the rook toolbox.
workers: 0
# enable the crash collector for ceph daemon crash collection
crashCollector:
disable: false
# To control where various services will be scheduled by kubernetes, use the placement configuration sections below.
# The example under 'all' would have all services scheduled on kubernetes nodes labeled with 'role=storage-node' and
# tolerate taints with a key of 'storage-node'.
placement:
mgr:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: "node-role.kubernetes.io/master"
operator: In
values:
- "true"
# all:
# nodeAffinity:
# requiredDuringSchedulingIgnoredDuringExecution:
# nodeSelectorTerms:
# - matchExpressions:
# - key: role
# operator: In
# values:
# - storage-node
# podAffinity:
# podAntiAffinity:
# tolerations:
# - key: storage-node
# operator: Exists
# The above placement information can also be specified for mon, osd, and mgr components
# mon:
# Monitor deployments may contain an anti-affinity rule for avoiding monitor
# collocation on the same node. This is a required rule when host network is used
# or when AllowMultiplePerNode is false. Otherwise this anti-affinity rule is a
# preferred rule with weight: 50.
# osd:
# mgr:
annotations:
# all:
# mon:
# osd:
# If no mgr annotations are set, prometheus scrape annotations will be set by default.
# mgr:
resources:
# The requests and limits set here, allow the mgr pod to use half of one CPU core and 1 gigabyte of memory
# mgr:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# The above example requests/limits can also be added to the mon and osd components
# mon:
# osd:
# prepareosd:
# crashcollector:
# The option to automatically remove OSDs that are out and are safe to destroy.
removeOSDsIfOutAndSafeToRemove: false
# priorityClassNames:
# all: rook-ceph-default-priority-class
# mon: rook-ceph-mon-priority-class
# osd: rook-ceph-osd-priority-class
# mgr: rook-ceph-mgr-priority-class
storage: # cluster level storage configuration and selection
useAllNodes: true
useAllDevices: false
deviceFilter: "sda"
config:
# The default and recommended storeType is dynamically set to bluestore for devices and filestore for directories.
# Set the storeType explicitly only if it is required not to use the default.
# storeType: bluestore
# metadataDevice: "md0" # specify a non-rotational storage so ceph-volume will use it as block db device of bluestore.
# databaseSizeMB: "1024" # uncomment if the disks are smaller than 100 GB
# journalSizeMB: "1024" # uncomment if the disks are 20 GB or smaller
osdsPerDevice: "1" # this value can be overridden at the node or device level
# encryptedDevice: "true" # the default value for this option is "false"
# Cluster level list of directories to use for filestore-based OSD storage. If uncomment, this example would create an OSD under the dataDirHostPath.
#directories:
#- path: /var/lib/rook
# Individual nodes and their config can be specified as well, but 'useAllNodes' above must be set to false. Then, only the named
# nodes below will be used as storage resources. Each node's 'name' field should match their 'kubernetes.io/hostname' label.
# nodes:
# - name: "172.17.4.101"
# directories: # specific directories to use for storage can be specified for each node
# - path: "/rook/storage-dir"
# resources:
# limits:
# cpu: "500m"
# memory: "1024Mi"
# requests:
# cpu: "500m"
# memory: "1024Mi"
# - name: "172.17.4.201"
# devices: # specific devices to use for storage can be specified for each node
# - name: "sdb"
# - name: "nvme01" # multiple osds can be created on high performance devices
# config:
# osdsPerDevice: "5"
# config: # configuration can be specified at the node level which overrides the cluster level config
# storeType: filestore
# - name: "172.17.4.301"
# deviceFilter: "^sd."
# The section for configuring management of daemon disruptions during upgrade or fencing.
disruptionManagement:
# If true, the operator will create and manage PodDisruptionBudgets for OSD, Mon, RGW, and MDS daemons. OSD PDBs are managed dynamically
# via the strategy outlined in the [design](https://github.com/rook/rook/blob/master/design/ceph/ceph-managed-disruptionbudgets.md). The operator will
# block eviction of OSDs by default and unblock them safely when drains are detected.
managePodBudgets: false
# A duration in minutes that determines how long an entire failureDomain like `region/zone/host` will be held in `noout` (in addition to the
# default DOWN/OUT interval) when it is draining. This is only relevant when `managePodBudgets` is `true`. The default value is `30` minutes.
osdMaintenanceTimeout: 30
# If true, the operator will create and manage MachineDisruptionBudgets to ensure OSDs are only fenced when the cluster is healthy.
# Only available on OpenShift.
manageMachineDisruptionBudgets: false
# Namespace in which to watch for the MachineDisruptionBudgets.
machineDisruptionBudgetNamespace: openshift-machine-api
журналы kubectl -f rook-ceph-mon-a-5cf78848c7-jthsn -n rook-ceph -c chown-container-data-dir:
chown: changing ownership of '/var/log/ceph': Permission denied
chown: changing ownership of '/var/lib/ceph/crash': Permission denied
chown: changing ownership of '/var/lib/ceph/mon/ceph-a': Permission denied
failed to change ownership of '/var/log/ceph' from root:root to ceph:ceph
failed to change ownership of '/var/lib/ceph/crash' from root:root to ceph:ceph
failed to change ownership of '/var/lib/ceph/mon/ceph-a' from root:root to ceph:ceph
kubectl -n rook-ceph получить под:
NAME READY STATUS RESTARTS AGE
csi-cephfsplugin-5dkbv 3/3 Running 0 58m
csi-cephfsplugin-87tcm 3/3 Running 0 58m
csi-cephfsplugin-lt2dj 3/3 Running 0 58m
csi-cephfsplugin-provisioner-7b8fbf88b4-dwffp 4/4 Running 0 58m
csi-cephfsplugin-provisioner-7b8fbf88b4-l7q8d 4/4 Running 0 58m
csi-rbdplugin-5rqfp 3/3 Running 0 58m
csi-rbdplugin-l99zm 3/3 Running 0 58m
csi-rbdplugin-provisioner-6b8b4d558c-4gp8d 5/5 Running 0 58m
csi-rbdplugin-provisioner-6b8b4d558c-wl64n 5/5 Running 0 58m
csi-rbdplugin-z2rqp 3/3 Running 0 58m
rook-ceph-crashcollector-h2-worker-01.internal.djcminuz.cosp9mf 0/1 Init:0/2 0 24m
rook-ceph-crashcollector-h2-worker-02.internal.djcminuz.col6b4d 0/1 Init:0/2 0 55m
rook-ceph-mon-a-5cf78848c7-jthsn 0/1 Init:CrashLoopBackOff 15 55m
rook-ceph-mon-d-7676c977b9-bkv2k 0/1 Init:CrashLoopBackOff 9 24m
rook-ceph-operator-7dcd87699d-jq7jn 1/1 Running 0 66m
rook-discover-bslvz 1/1 Running 0 66m
rook-discover-jls6q 1/1 Running 0 66m
rook-discover-prbn2 1/1 Running 0 66m
1 ответ
Я модифицировал operator.yaml
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "false"
кому:
- name: ROOK_HOSTPATH_REQUIRES_PRIVILEGED
value: "true"
с помощью чат-группы "ладья слабина".