tasks:
+- exec:
+ host.a:
+ - |
+ set -ex
+ toolbox() {
+ kubectl -n rook-ceph exec -it deploy/rook-ceph-tools -- "$@"
+ }
+ orig_num_osd=`toolbox ceph osd stat | cut -f3 -d " "`
+ toolbox ceph orch osd rm 0 --force
+ removed_pv=""
+ while [ "$removed_pv" = "" ]
+ do
+ removed_pv=`kubectl get pv | grep Released | cut -f1 -d " "`
+ sleep 3s
+ done
+ target_path=`kubectl get pv $removed_pv -o jsonpath='{.spec.local.path}'`
+ host=`echo $removed_pv | cut -f1 -d "-"`
+ toolbox ceph orch device zap $host $target_path --force
+ zap_completion="0"
+ while [ "$zap_completion" = "0" ]
+ do
+ zap_completion=`kubectl get job -n rook-ceph rook-ceph-device-zap -o jsonpath='{.status.succeeded.path}'`
+ sleep 3s
+ done
+ kubectl patch pv $removed_pv -p '{"spec":{"claimRef": null}}'
+ toolbox ceph orch apply osd --all-available-devices
+ kubectl delete job rook-ceph-device-zap -n rook-ceph
+ num_osd="0"
+ while [ "$num_osd" != "$orig_num_osd" ]
+ do
+ echo "waiting for osd to come back up"
+ num_osd=`toolbox ceph osd stat | cut -f3 -d " "`
+ sleep 30s
+ done
- rook.shell:
commands:
- ceph orch status
log = logging.getLogger(__name__)
+def path_to_examples(ctx, cluster_name : str) -> str:
+ for p in ['rook/deploy/examples/', 'rook/cluster/examples/kubernetes/ceph/']:
+ try:
+ ctx.rook[cluster_name].remote.get_file(p + 'operator.yaml')
+ return p
+ except:
+ pass
+ assert False, 'Path to examples not found'
def _kubectl(ctx, config, args, **kwargs):
cluster_name = config.get('cluster', 'ceph')
)
# operator.yaml
+ log.info(os.path.abspath(os.getcwd()))
+ object_methods = [method_name for method_name in dir(ctx.rook[cluster_name].remote)
+ if callable(getattr(ctx.rook[cluster_name].remote, method_name))]
+ log.info(object_methods)
operator_yaml = ctx.rook[cluster_name].remote.read_file(
- 'rook/cluster/examples/kubernetes/ceph/operator.yaml'
+ (path_to_examples(ctx, cluster_name) + 'operator.yaml')
)
rook_image = config.get('rook_image')
if rook_image:
log.info('Deploying operator')
_kubectl(ctx, config, [
'create',
- '-f', 'rook/cluster/examples/kubernetes/ceph/crds.yaml',
- '-f', 'rook/cluster/examples/kubernetes/ceph/common.yaml',
+ '-f', (path_to_examples(ctx, cluster_name) + 'crds.yaml'),
+ '-f', (path_to_examples(ctx, cluster_name) + 'common.yaml'),
'-f', 'operator.yaml',
])
# fails sometimes when deleting some of the CRDs... not sure why!)
_kubectl(ctx, config, [
'delete',
- '-f', 'rook/cluster/examples/kubernetes/ceph/common.yaml',
+ '-f', (path_to_examples() + 'common.yaml'),
])
_kubectl(ctx, config, [
'delete',
- '-f', 'rook/cluster/examples/kubernetes/ceph/crds.yaml',
+ '-f', (path_to_examples() + 'crds.yaml'),
])
ctx.rook[cluster_name].remote.run(args=['rm', '-rf', 'rook', 'operator.yaml'])
if op_job:
try:
_kubectl(ctx, config, [
'create',
- '-f', 'rook/cluster/examples/kubernetes/ceph/toolbox.yaml',
+ '-f', (path_to_examples(ctx, cluster_name) + 'toolbox.yaml'),
])
log.info('Waiting for tools container to start')
finally:
_kubectl(ctx, config, [
'delete',
- '-f', 'rook/cluster/examples/kubernetes/ceph/toolbox.yaml',
+ '-f', (path_to_examples(ctx, cluster_name) + 'toolbox.yaml'),
], check_status=False)
yield
-
@contextlib.contextmanager
def ceph_config_keyring(ctx, config):
# get config and push to hosts