ceph orchestrator set backend rook
+You can then check backend is properly configured:
+
+::
+
+ ceph orchestrator status
+
+
Usage
=====
.. automethod:: Orchestrator.upgrade_status
.. autoclass:: UpgradeSpec
.. autoclass:: UpgradeStatusSpec
+
+Utility
+-------
+
+.. automethod:: Orchestrator.available
+
"""
Enable other modules to interrogate this module to discover
whether it's usable as an orchestrator module.
+
+ Subclasses do not need to override this.
"""
return True
+ def available(self):
+ """
+ Report whether we can talk to the orchestrator. This is the
+ place to give the user a meaningful message if the orchestrator
+ isn't running or can't be contacted.
+
+ This method may be called frequently (e.g. every page load
+ to conditionally display a warning banner), so make sure it's
+ not too expensive. It's okay to give a slightly stale status
+ (e.g. based on a periodic background ping of the orchestrator)
+ if that's necessary to make this method fast.
+
+ Do not override this method if you don't have a meaningful
+ status to return: the default None, None return value is used
+ to indicate that a module is unable to indicate its availability.
+
+ @return two-tuple of boolean, string
+ """
+ return None, None
+
def wait(self, completions):
"""
Given a list of Completion instances, progress any which are
"desc": "Select orchestrator module backend",
"perm": "rw"
},
+ {
+ "cmd": "orchestrator status",
+ "desc": "Report configured backend and its status",
+ "perm": "r"
+ }
]
def _select_orchestrator(self):
module_name
)
+ def _status(self):
+ try:
+ avail, why = self._oremote("available")
+ except NoOrchestrator:
+ return 0, "No orchestrator configured (try " \
+ "`ceph orchestrator set backend`)", ""
+
+ if avail is None:
+ # The module does not report its availability
+ return 0, "Backend: {0}".format(
+ self._select_orchestrator()), ""
+ else:
+ return 0, "Backend: {0}\nAvailable: {1}{2}".format(
+ self._select_orchestrator(),
+ avail,
+ " ({0})".format(why) if not avail else ""
+ ), ""
+
def handle_command(self, inbuf, cmd):
try:
return self._handle_command(inbuf, cmd)
return self._service_status(cmd)
elif cmd['prefix'] == "orchestrator service add":
return self._service_add(cmd)
- if cmd['prefix'] == "orchestrator set backend":
+ elif cmd['prefix'] == "orchestrator set backend":
return self._set_backend(cmd)
+ elif cmd['prefix'] == "orchestrator status":
+ return self._status()
else:
raise NotImplementedError()
try:
from kubernetes import client, config
+ from kubernetes.client.rest import ApiException
kubernetes_imported = True
except ImportError:
if kubernetes_imported:
return True, ""
else:
- return False, "kubernetes module not found"
+ return False, "Kubernetes module not found"
+
+ def available(self):
+ if not kubernetes_imported:
+ return False, "Kubernetes module not found"
+ elif not self._in_cluster():
+ return False, "ceph-mgr not running in Rook cluster"
+
+ try:
+ self.k8s.list_namespaced_pod(self.rook_cluster.cluster_name)
+ except ApiException:
+ return False, "Cannot reach Kubernetes API"
+ else:
+ return True, ""
def __init__(self, *args, **kwargs):
super(RookOrchestrator, self).__init__(*args, **kwargs)
self._initialized.wait()
return self._rook_cluster
+ def _in_cluster(self):
+ """
+ Check if we appear to be running inside a Kubernetes/Rook
+ cluster
+
+ :return: bool
+ """
+ return 'ROOK_CLUSTER_NAME' in os.environ
+
def serve(self):
# For deployed clusters, we should always be running inside
# a Rook cluster. For development convenience, also support
# running outside (reading ~/.kube config)
- in_cluster = 'ROOK_CLUSTER_NAME' in os.environ
- if in_cluster:
+
+ if self._in_cluster():
config.load_incluster_config()
cluster_name = os.environ['ROOK_CLUSTER_NAME']
else: