##################################
+class CephIscsi(object):
+ """Defines a Ceph-Iscsi container"""
+
+ daemon_type = 'iscsi'
+ entrypoint = '/usr/bin/rbd-target-api'
+
+ required_files = ['iscsi-gateway.cfg']
+
+ def __init__(self,
+ fsid,
+ daemon_id,
+ config_json,
+ image=DEFAULT_IMAGE):
+ # type: (str, Union[int, str], Dict, str) -> None
+ self.fsid = fsid
+ self.daemon_id = daemon_id
+ self.image = image
+
+ def json_get(key, default=None, require=False):
+ if require and not key in config_json.keys():
+ raise Error('{} missing from config-json'.format(key))
+ return config_json.get(key, default)
+
+ # config-json options
+ self.files = json_get('files', {})
+
+ # validate the supplied args
+ self.validate()
+
+ @classmethod
+ def init(cls, fsid, daemon_id):
+ # type: (str, Union[int, str]) -> CephIscsi
+ return cls(fsid, daemon_id, get_parm(args.config_json), args.image)
+
+ @staticmethod
+ def get_container_mounts(data_dir, log_dir):
+ # type: (str, str) -> Dict[str, str]
+ mounts = dict()
+ mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z'
+ mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z'
+ mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z'
+ mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config:z'
+ mounts[log_dir] = '/var/log/rbd-target-api:z'
+ mounts['/dev/log'] = '/dev/log:z'
+ return mounts
+
+ @staticmethod
+ def get_version(container_id):
+ # type(str) -> Optional[str]
+ version = None
+ out, err, code = call(
+ [container_path, 'exec', container_id,
+ '/usr/bin/python3', '-c', "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"])
+ if code == 0:
+ version = out
+ return version
+
+ def validate(self):
+ # type () -> None
+ if not is_fsid(self.fsid):
+ raise Error('not an fsid: %s' % self.fsid)
+ if not self.daemon_id:
+ raise Error('invalid daemon_id: %s' % self.daemon_id)
+ if not self.image:
+ raise Error('invalid image: %s' % self.image)
+
+ # check for the required files
+ if self.required_files:
+ for fname in self.required_files:
+ if fname not in self.files:
+ raise Error('required file missing from config-json: %s' % fname)
+
+ def get_daemon_name(self):
+ # type: () -> str
+ return '%s.%s' % (self.daemon_type, self.daemon_id)
+
+ def get_container_name(self, desc=None):
+ # type: (Optional[str]) -> str
+ cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name())
+ if desc:
+ cname = '%s-%s' % (cname, desc)
+ return cname
+
+ def get_file_content(self, fname):
+ # type: (str) -> str
+ """Normalize the json file content into a string"""
+ content = self.files.get(fname)
+ if isinstance(content, list):
+ content = '\n'.join(content)
+ return content
+
+ def create_daemon_dirs(self, data_dir, uid, gid):
+ # type: (str, int, int) -> None
+ """Create files under the container data dir"""
+ if not os.path.isdir(data_dir):
+ raise OSError('data_dir is not a directory: %s' % (data_dir))
+
+ logger.info('Creating ceph-iscsi config...')
+ configfs_dir = os.path.join(data_dir, 'configfs')
+ makedirs(configfs_dir, uid, gid, 0o755)
+
+ # populate files from the config-json
+ for fname in self.files:
+ config_file = os.path.join(data_dir, fname)
+ config_content = self.get_file_content(fname)
+ logger.info('Write file: %s' % (config_file))
+ with open(config_file, 'w') as f:
+ os.fchown(f.fileno(), uid, gid)
+ os.fchmod(f.fileno(), 0o600)
+ f.write(config_content)
+
+ @staticmethod
+ def configfs_mount_umount(data_dir, mount=True):
+ mount_path = os.path.join(data_dir, 'configfs')
+ if mount:
+ cmd = "if ! grep -qs {0} /proc/mounts; then " \
+ "mount -t configfs none {0}; fi".format(mount_path)
+ else:
+ cmd = "if grep -qs {0} /proc/mounts; then " \
+ "umount {0}; fi".format(mount_path)
+ return cmd.split()
+
+##################################
+
def get_supported_daemons():
supported_daemons = list(Ceph.daemons)
supported_daemons.extend(Monitoring.components)
supported_daemons.append(NFSGanesha.daemon_type)
+ supported_daemons.append(CephIscsi.daemon_type)
assert len(supported_daemons) == len(set(supported_daemons))
return supported_daemons
return r
def create_daemon_dirs(fsid, daemon_type, daemon_id, uid, gid,
- config=None, keyring=None,
- reconfig=False):
+ config=None, keyring=None, reconfig=False):
# type: (str, str, Union[int, str], int, int, Optional[str], Optional[str], Optional[bool]) -> None
data_dir = make_data_dir(fsid, daemon_type, daemon_id, uid=uid, gid=gid)
make_log_dir(fsid, uid=uid, gid=gid)
nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
nfs_ganesha.create_daemon_dirs(data_dir, uid, gid)
+ if daemon_type == CephIscsi.daemon_type:
+ ceph_iscsi = CephIscsi.init(fsid, daemon_id)
+ ceph_iscsi.create_daemon_dirs(data_dir, uid, gid)
+
def get_parm(option):
# type: (str) -> Dict[str, str]
data_dir = get_data_dir(fsid, daemon_type, daemon_id)
mounts.update(NFSGanesha.get_container_mounts(data_dir))
+ if daemon_type == CephIscsi.daemon_type:
+ assert daemon_id
+ data_dir = get_data_dir(fsid, daemon_type, daemon_id)
+ log_dir = get_log_dir(fsid)
+ mounts.update(CephIscsi.get_container_mounts(data_dir, log_dir))
+
return mounts
def get_container(fsid, daemon_type, daemon_id,
elif daemon_type == NFSGanesha.daemon_type:
entrypoint = NFSGanesha.entrypoint
name = '%s.%s' % (daemon_type, daemon_id)
+ elif daemon_type == CephIscsi.daemon_type:
+ entrypoint = CephIscsi.entrypoint
+ name = '%s.%s' % (daemon_type, daemon_id)
else:
entrypoint = ''
name = ''
nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
prestart = nfs_ganesha.get_rados_grace_container('add')
f.write(' '.join(prestart.run_cmd()) + '\n')
+ elif daemon_type == CephIscsi.daemon_type:
+ f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=True)) + '\n')
if daemon_type in Ceph.daemons:
install_path = find_program('install')
nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
poststop = nfs_ganesha.get_rados_grace_container('remove')
f.write(' '.join(poststop.run_cmd()) + '\n')
+ elif daemon_type == CephIscsi.daemon_type:
+ f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=False)) + '\n')
os.fchmod(f.fileno(), 0o600)
os.rename(data_dir + '/unit.poststop.new',
data_dir + '/unit.poststop')
cmd = ['dashboard', 'ac-user-create', args.initial_dashboard_user, password, 'administrator', '--force-password']
if not args.dashboard_password_noupdate:
cmd.append('--pwd-update-required')
- cli(cmd)
+ cli(cmd)
logger.info('Fetching dashboard port number...')
out = cli(['config', 'get', 'mgr', 'mgr/dashboard/ssl_server_port'])
port = int(out)
deploy_daemon(args.fsid, daemon_type, daemon_id, c, uid, gid,
config=config, keyring=keyring,
reconfig=args.reconfig)
+ elif daemon_type == CephIscsi.daemon_type:
+ (config, keyring) = get_config_and_keyring()
+ (uid, gid) = extract_uid_gid()
+ c = get_container(args.fsid, daemon_type, daemon_id)
+ deploy_daemon(args.fsid, daemon_type, daemon_id, c, uid, gid,
+ config=config, keyring=keyring,
+ reconfig=args.reconfig)
else:
raise Error("{} not implemented in command_deploy function".format(daemon_type))
version = seen_versions.get(image_id, None)
if daemon_type == NFSGanesha.daemon_type:
version = NFSGanesha.get_version(container_id)
+ if daemon_type == CephIscsi.daemon_type:
+ version = CephIscsi.get_version(container_id)
elif not version:
if daemon_type in Ceph.daemons:
out, err, code = call(
get_hostname(), args.expect_hostname))
logger.info('Hostname "%s" matches what is expected.',
args.expect_hostname)
-
+
if errors:
raise Error('\n'.join(errors))
'deps': deps,
'last_config': stamp,
}
-
+
def update_last_host_check(self, host):
# type: (str) -> None
self.last_host_check[host] = datetime.datetime.utcnow()
def _get_config_and_keyring(self, daemon_type, daemon_id,
keyring=None,
- extra_config=None):
+ extra_ceph_config=None):
# type: (str, str, Optional[str], Optional[str]) -> Dict[str, Any]
# keyring
if not keyring:
ret, config, err = self.mon_command({
"prefix": "config generate-minimal-conf",
})
- if extra_config:
- config += extra_config
+ if extra_ceph_config:
+ config += extra_ceph_config
return {
'config': config,
osd_uuid_map=None):
if not extra_args:
extra_args = []
+ if not extra_config:
+ extra_config = {}
name = '%s.%s' % (daemon_type, daemon_id)
start_time = datetime.datetime.utcnow()
cephadm_config = self._get_config_and_keyring(
daemon_type, daemon_id,
keyring=keyring,
- extra_config=extra_config)
+ extra_ceph_config=extra_config.pop('config', ''))
+ if extra_config:
+ cephadm_config.update({'files': extra_config})
extra_args.extend(['--config-json', '-'])
# osd deployments needs an --osd-uuid arg
'prometheus': self._create_prometheus,
'node-exporter': self._create_node_exporter,
'crash': self._create_crash,
+ 'iscsi': self._create_iscsi,
}
config_fns = {
'mds': self._config_mds,
'rgw': self._config_rgw,
'nfs': self._config_nfs,
+ 'iscsi': self._config_iscsi,
}
create_func = create_fns.get(daemon_type, None)
if not create_func:
args.append((daemon_id, host, network)) # type: ignore
elif daemon_type == 'nfs':
args.append((daemon_id, host, spec)) # type: ignore
+ elif daemon_type == 'iscsi':
+ args.append((daemon_id, host, spec)) # type: ignore
else:
args.append((daemon_id, host)) # type: ignore
return self._create_daemon('mon', name, host,
keyring=keyring,
- extra_config=extra_config)
+ extra_config={'config': extra_config})
def add_mon(self, spec):
# type: (ServiceSpec) -> orchestrator.Completion
'mgr': PlacementSpec(count=2),
'mds': PlacementSpec(count=2),
'rgw': PlacementSpec(count=2),
+ 'iscsi': PlacementSpec(count=1),
'rbd-mirror': PlacementSpec(count=2),
'nfs': PlacementSpec(count=1),
'grafana': PlacementSpec(count=1),
def apply_rgw(self, spec):
return self._apply(spec)
+ def add_iscsi(self, spec):
+ # type: (ServiceSpec) -> orchestrator.Completion
+ return self._add_daemon('iscsi', spec, self._create_iscsi, self._config_iscsi)
+
+ def _config_iscsi(self, spec):
+ logger.info('Saving service %s spec with placement %s' % (
+ spec.service_name(), spec.placement.pretty_str()))
+ self.spec_store.save(spec)
+
+ def _create_iscsi(self, igw_id, host, spec):
+ ret, keyring, err = self.mon_command({
+ 'prefix': 'auth get-or-create',
+ 'entity': utils.name_to_config_section('iscsi') + '.' + igw_id,
+ 'caps': ['mon', 'allow rw',
+ 'osd', f'allow rwx pool={spec.pool}'],
+ })
+
+ api_secure = 'false' if spec.api_secure is None else spec.api_secure
+ igw_conf = f"""
+# generated by cephadm
+[config]
+cluster_client_name = {utils.name_to_config_section('iscsi')}.{igw_id}
+pool = {spec.pool}
+trusted_ip_list = {spec.trusted_ip_list or ''}
+minimum_gateways = 1
+fqdn_enabled = {spec.fqdn_enabled or ''}
+api_port = {spec.api_port or ''}
+api_user = {spec.api_user or ''}
+api_password = {spec.api_password or ''}
+api_secure = {api_secure}
+"""
+ extra_config = {'iscsi-gateway.cfg': igw_conf}
+ return self._create_daemon('iscsi', igw_id, host, keyring=keyring,
+ extra_config=extra_config)
+
+ @trivial_completion
+ def apply_iscsi(self, spec):
+ return self._apply(spec)
+
def add_rbd_mirror(self, spec):
return self._add_daemon('rbd-mirror', spec, self._create_rbd_mirror)
pass
from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
- NFSServiceSpec
+ NFSServiceSpec, IscsiServiceSpec
from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
HostSpec, OrchestratorError
from tests import mock
[out] = wait(cephadm_module, c)
match_glob(out, "Deployed nfs.name.* on host 'test'")
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+ def test_iscsi(self, cephadm_module):
+ with self._with_host(cephadm_module, 'test'):
+ ps = PlacementSpec(hosts=['test'], count=1)
+ spec = IscsiServiceSpec('name', pool='pool', placement=ps)
+ c = cephadm_module.add_iscsi(spec)
+ [out] = wait(cephadm_module, c)
+ match_glob(out, "Deployed iscsi.name.* on host 'test'")
+
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_prometheus(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
assert wait(cephadm_module, c) == 'Scheduled nfs update...'
assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
+ @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+ def test_apply_iscsi_save(self, cephadm_module):
+ with self._with_host(cephadm_module, 'test'):
+ ps = PlacementSpec(hosts=['test'], count=1)
+ spec = IscsiServiceSpec('name', pool='pool', placement=ps)
+ c = cephadm_module.apply_iscsi(spec)
+ assert wait(cephadm_module, c) == 'Scheduled iscsi update...'
+ assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
+
@mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
def test_apply_prometheus_save(self, cephadm_module):
with self._with_host(cephadm_module, 'test'):
Map from daemon names to ceph entity names (as seen in config)
"""
daemon_type = name.split('.', 1)[0]
- if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash']:
+ if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash', 'iscsi']:
return 'client.' + name
elif daemon_type in ['mon', 'osd', 'mds', 'mgr', 'client']:
return name
from ceph.deployment import inventory
from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec, \
- ServiceSpecValidationError
+ ServiceSpecValidationError, IscsiServiceSpec
from ceph.deployment.drive_group import DriveGroupSpec
from mgr_module import MgrModule, CLICommand, HandleCommandResult
'alertmanager': self.apply_alertmanager,
'crash': self.apply_crash,
'grafana': self.apply_grafana,
+ 'iscsi': cast(Callable[[ServiceSpec], Completion], self.apply_iscsi),
'mds': self.apply_mds,
'mgr': self.apply_mgr,
'mon': self.apply_mon,
"""Update NFS cluster"""
raise NotImplementedError()
+ def add_iscsi(self, spec):
+ # type: (IscsiServiceSpec) -> Completion
+ """Create iscsi daemon(s)"""
+ raise NotImplementedError()
+
+ def apply_iscsi(self, spec):
+ # type: (IscsiServiceSpec) -> Completion
+ """Update iscsi cluster"""
+ raise NotImplementedError()
+
def add_prometheus(self, spec):
# type: (ServiceSpec) -> Completion
"""Create new prometheus daemon"""
if self.daemon_type == 'rgw':
v = self.daemon_id.split('.')
return '.'.join(v[0:2])
- if self.daemon_type in ['mds', 'nfs']:
+ if self.daemon_type in ['mds', 'nfs', 'iscsi']:
return self.daemon_id.split('.')[0]
return self.daemon_type
def service_name(self):
- if self.daemon_type in ['rgw', 'mds', 'nfs']:
+ if self.daemon_type in ['rgw', 'mds', 'nfs', 'iscsi']:
return f'{self.daemon_type}.{self.service_id()}'
return self.daemon_type
raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
- ServiceDescription
+ ServiceDescription, IscsiServiceSpec
def nice_delta(now, t, suffix=''):
if t:
completion = self.add_node_exporter(spec)
elif daemon_type == 'prometheus':
completion = self.add_prometheus(spec)
+ elif daemon_type == 'iscsi':
+ completion = self.add_iscsi(spec)
else:
raise OrchestratorValidationError(f'unknown daemon type `{daemon_type}`')
raise_if_exception(completion)
return HandleCommandResult(stdout=completion.result_str())
+ @_cli_write_command(
+ 'orch daemon add iscsi',
+ 'name=pool,type=CephString '
+ 'name=fqdn_enabled,type=CephString,req=false '
+ 'name=trusted_ip_list,type=CephString,req=false '
+ 'name=placement,type=CephString,req=false',
+ 'Start iscsi daemon(s)')
+ def _iscsi_add(self, pool, fqdn_enabled=None, trusted_ip_list=None, placement=None, inbuf=None):
+ usage = """
+ Usage:
+ ceph orch daemon add iscsi -i <json_file>
+ ceph orch daemon add iscsi <pool>
+ """
+ if inbuf:
+ try:
+ iscsi_spec = IscsiServiceSpec.from_json(json.loads(inbuf))
+ except ValueError as e:
+ msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
+ return HandleCommandResult(-errno.EINVAL, stderr=msg)
+ else:
+ iscsi_spec = IscsiServiceSpec(
+ service_id='iscsi',
+ pool=pool,
+ fqdn_enabled=fqdn_enabled,
+ trusted_ip_list=trusted_ip_list,
+ placement=PlacementSpec.from_string(placement),
+ )
+
+ completion = self.add_iscsi(iscsi_spec)
+ self._orchestrator_wait([completion])
+ raise_if_exception(completion)
+ return HandleCommandResult(stdout=completion.result_str())
+
@_cli_write_command(
'orch daemon add nfs',
"name=svc_arg,type=CephString "
start the services.
"""
- KNOWN_SERVICE_TYPES = 'alertmanager crash grafana mds mgr mon nfs ' \
+ KNOWN_SERVICE_TYPES = 'alertmanager crash grafana iscsi mds mgr mon nfs ' \
'node-exporter osd prometheus rbd-mirror rgw'.split()
@classmethod
ret = {
'rgw': RGWSpec,
'nfs': NFSServiceSpec,
- 'osd': DriveGroupSpec
+ 'osd': DriveGroupSpec,
+ 'iscsi': IscsiServiceSpec,
}.get(service_type, cls)
if ret == ServiceSpec and not service_type:
raise ServiceSpecValidationError('Spec needs a "service_type" key.')
# This must not be a method of ServiceSpec, otherwise you'll hunt
# sub-interpreter affinity bugs.
ServiceSpec.validate(self)
- if self.service_type in ['mds', 'rgw', 'nfs'] and not self.service_id:
+ if self.service_type in ['mds', 'rgw', 'nfs', 'iscsi'] and not self.service_id:
raise ServiceSpecValidationError('Cannot add Service: id required')
return 443
else:
return 80
+
+
+class IscsiServiceSpec(ServiceSpec):
+ def __init__(self, service_id, pool=None,
+ placement=None,
+ trusted_ip_list=None,
+ fqdn_enabled=None,
+ api_port=None,
+ api_user=None,
+ api_password=None,
+ api_secure=None,
+ ssl_cert=None,
+ ssl_key=None,
+ service_type='iscsi',
+ unmanaged=False):
+ assert service_type == 'iscsi'
+ super(IscsiServiceSpec, self).__init__('iscsi', service_id=service_id,
+ placement=placement, unmanaged=unmanaged)
+
+ #: RADOS pool where ceph-iscsi config data is stored.
+ self.pool = pool
+ self.trusted_ip_list = trusted_ip_list
+ self.fqdn_enabled = fqdn_enabled
+ self.api_port = api_port
+ self.api_user = api_user
+ self.api_password = api_password
+ self.api_secure = api_secure
+ self.ssl_cert = ssl_cert
+ self.ssl_key = ssl_key
+
+ def validate_add(self):
+ servicespec_validate_add(self)
+
+ if not self.pool:
+ raise ServiceSpecValidationError(
+ 'Cannot add ISCSI: No Pool specified')