]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: ceph-iscsi first draft 34053/head
authorMatthew Oliver <moliver@suse.com>
Thu, 5 Mar 2020 02:55:47 +0000 (13:55 +1100)
committerMatthew Oliver <moliver@suse.com>
Thu, 2 Apr 2020 23:31:26 +0000 (23:31 +0000)
This if the first draft of the ceph-iscsi in cephadm.
There are a few gotchas when running `rbd-target-api` in a container:

 1. We need both the ceph.conf and iscsi-gateway.cfg, so needed to
ability to pass extra config. This latter is based off the spec, so now
the daemon config func api allows you to return a dict of configs:

  { 'config': '<str>' # will be appended to the ceph.conf
    '<conf name>': 'str', # Will be dumped in datadir/<conf name>
    ...
  }

It will be up to cephadm to know to bind mount it to the right location.
The first 'config' isn't used by this patch, but makes it possible for
specs or config funcs to append anything? maybe it's overkill.

 2. We need the kernel's configfs in the container so we can configure
LIO. There is a chicken and egg problem, configfs isn't mounted on the
host to bind mount when the container starts. So now a check is added to
the `unit.run` and cleanup in the `unit.poststop` scripts for
daemon_type iscsi.

 3. rbd-target-api is python and hardcodes a few things, like logging
through `/dev/log` which happens to be a domain socket. So `/dev/log`
also needed to be bind mounted into the continer.

 4. The daemon expects the keyring to be in `/etc/ceph` so this needed to
be specifically bind mounted to the correct location too.

As this currently stands this is deploying and starting the api on port
5000, so seems to be "working", also gateway.conf does exist in the
pool. I have yet to set up an iscsi device, but will test that next.

The `rbd-target-api` daemon expects the ssl key and cert to be named a
certain name in the contianer. So SSL isn't working yet. However, I do
hav a PR in ceph-iscsi to look in the mon config-key store for them[0].

[0] - https://github.com/ceph/ceph-iscsi/pull/173

Signed-off-by: Matthew Oliver <moliver@suse.com>
src/cephadm/cephadm
src/pybind/mgr/cephadm/module.py
src/pybind/mgr/cephadm/tests/test_cephadm.py
src/pybind/mgr/cephadm/utils.py
src/pybind/mgr/orchestrator/_interface.py
src/pybind/mgr/orchestrator/module.py
src/python-common/ceph/deployment/service_spec.py

index 0dcab6c034d5464392e60cacc7503e7bca7831af..872fabbaf8c04fcdf643c06358cec3d004a3020c 100755 (executable)
@@ -327,10 +327,135 @@ class NFSGanesha(object):
 
 ##################################
 
+class CephIscsi(object):
+    """Defines a Ceph-Iscsi container"""
+
+    daemon_type = 'iscsi'
+    entrypoint = '/usr/bin/rbd-target-api'
+
+    required_files = ['iscsi-gateway.cfg']
+
+    def __init__(self,
+                 fsid,
+                 daemon_id,
+                 config_json,
+                 image=DEFAULT_IMAGE):
+        # type: (str, Union[int, str], Dict, str) -> None
+        self.fsid = fsid
+        self.daemon_id = daemon_id
+        self.image = image
+
+        def json_get(key, default=None, require=False):
+            if require and not key in config_json.keys():
+                raise Error('{} missing from config-json'.format(key))
+            return config_json.get(key, default)
+
+        # config-json options
+        self.files = json_get('files', {})
+
+        # validate the supplied args
+        self.validate()
+
+    @classmethod
+    def init(cls, fsid, daemon_id):
+        # type: (str, Union[int, str]) -> CephIscsi
+        return cls(fsid, daemon_id, get_parm(args.config_json), args.image)
+
+    @staticmethod
+    def get_container_mounts(data_dir, log_dir):
+        # type: (str, str) -> Dict[str, str]
+        mounts = dict()
+        mounts[os.path.join(data_dir, 'config')] = '/etc/ceph/ceph.conf:z'
+        mounts[os.path.join(data_dir, 'keyring')] = '/etc/ceph/keyring:z'
+        mounts[os.path.join(data_dir, 'iscsi-gateway.cfg')] = '/etc/ceph/iscsi-gateway.cfg:z'
+        mounts[os.path.join(data_dir, 'configfs')] = '/sys/kernel/config:z'
+        mounts[log_dir] = '/var/log/rbd-target-api:z'
+        mounts['/dev/log'] = '/dev/log:z'
+        return mounts
+
+    @staticmethod
+    def get_version(container_id):
+        # type(str) -> Optional[str]
+        version = None
+        out, err, code = call(
+            [container_path, 'exec', container_id,
+             '/usr/bin/python3', '-c', "import pkg_resources; print(pkg_resources.require('ceph_iscsi')[0].version)"])
+        if code == 0:
+            version = out
+        return version
+
+    def validate(self):
+        # type () -> None
+        if not is_fsid(self.fsid):
+            raise Error('not an fsid: %s' % self.fsid)
+        if not self.daemon_id:
+            raise Error('invalid daemon_id: %s' % self.daemon_id)
+        if not self.image:
+            raise Error('invalid image: %s' % self.image)
+
+        # check for the required files
+        if self.required_files:
+            for fname in self.required_files:
+                if fname not in self.files:
+                    raise Error('required file missing from config-json: %s' % fname)
+
+    def get_daemon_name(self):
+        # type: () -> str
+        return '%s.%s' % (self.daemon_type, self.daemon_id)
+
+    def get_container_name(self, desc=None):
+        # type: (Optional[str]) -> str
+        cname = 'ceph-%s-%s' % (self.fsid, self.get_daemon_name())
+        if desc:
+            cname = '%s-%s' % (cname, desc)
+        return cname
+
+    def get_file_content(self, fname):
+        # type: (str) -> str
+        """Normalize the json file content into a string"""
+        content = self.files.get(fname)
+        if isinstance(content, list):
+            content = '\n'.join(content)
+        return content
+
+    def create_daemon_dirs(self, data_dir, uid, gid):
+        # type: (str, int, int) -> None
+        """Create files under the container data dir"""
+        if not os.path.isdir(data_dir):
+            raise OSError('data_dir is not a directory: %s' % (data_dir))
+
+        logger.info('Creating ceph-iscsi config...')
+        configfs_dir = os.path.join(data_dir, 'configfs')
+        makedirs(configfs_dir, uid, gid, 0o755)
+
+        # populate files from the config-json
+        for fname in self.files:
+            config_file = os.path.join(data_dir, fname)
+            config_content = self.get_file_content(fname)
+            logger.info('Write file: %s' % (config_file))
+            with open(config_file, 'w') as f:
+                os.fchown(f.fileno(), uid, gid)
+                os.fchmod(f.fileno(), 0o600)
+                f.write(config_content)
+
+    @staticmethod
+    def configfs_mount_umount(data_dir, mount=True):
+        mount_path = os.path.join(data_dir, 'configfs')
+        if mount:
+            cmd = "if ! grep -qs {0} /proc/mounts; then " \
+                  "mount -t configfs none {0}; fi".format(mount_path)
+        else:
+            cmd = "if grep -qs {0} /proc/mounts; then " \
+                  "umount {0}; fi".format(mount_path)
+        return cmd.split()
+
+##################################
+
 def get_supported_daemons():
     supported_daemons = list(Ceph.daemons)
     supported_daemons.extend(Monitoring.components)
     supported_daemons.append(NFSGanesha.daemon_type)
+    supported_daemons.append(CephIscsi.daemon_type)
     assert len(supported_daemons) == len(set(supported_daemons))
     return supported_daemons
 
@@ -1287,8 +1412,7 @@ def get_daemon_args(fsid, daemon_type, daemon_id):
     return r
 
 def create_daemon_dirs(fsid, daemon_type, daemon_id, uid, gid,
-                       config=None, keyring=None,
-                       reconfig=False):
+                       config=None, keyring=None, reconfig=False):
     # type: (str, str, Union[int, str], int, int, Optional[str], Optional[str], Optional[bool]) ->  None
     data_dir = make_data_dir(fsid, daemon_type, daemon_id, uid=uid, gid=gid)
     make_log_dir(fsid, uid=uid, gid=gid)
@@ -1349,6 +1473,10 @@ def create_daemon_dirs(fsid, daemon_type, daemon_id, uid, gid,
         nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
         nfs_ganesha.create_daemon_dirs(data_dir, uid, gid)
 
+    if daemon_type == CephIscsi.daemon_type:
+        ceph_iscsi = CephIscsi.init(fsid, daemon_id)
+        ceph_iscsi.create_daemon_dirs(data_dir, uid, gid)
+
 def get_parm(option):
     # type: (str) -> Dict[str, str]
 
@@ -1464,6 +1592,12 @@ def get_container_mounts(fsid, daemon_type, daemon_id,
         data_dir = get_data_dir(fsid, daemon_type, daemon_id)
         mounts.update(NFSGanesha.get_container_mounts(data_dir))
 
+    if daemon_type == CephIscsi.daemon_type:
+        assert daemon_id
+        data_dir = get_data_dir(fsid, daemon_type, daemon_id)
+        log_dir = get_log_dir(fsid)
+        mounts.update(CephIscsi.get_container_mounts(data_dir, log_dir))
+
     return mounts
 
 def get_container(fsid, daemon_type, daemon_id,
@@ -1492,6 +1626,9 @@ def get_container(fsid, daemon_type, daemon_id,
     elif daemon_type == NFSGanesha.daemon_type:
         entrypoint = NFSGanesha.entrypoint
         name = '%s.%s' % (daemon_type, daemon_id)
+    elif daemon_type == CephIscsi.daemon_type:
+        entrypoint = CephIscsi.entrypoint
+        name = '%s.%s' % (daemon_type, daemon_id)
     else:
         entrypoint = ''
         name = ''
@@ -1651,6 +1788,8 @@ def deploy_daemon_units(fsid, uid, gid, daemon_type, daemon_id, c,
             nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
             prestart = nfs_ganesha.get_rados_grace_container('add')
             f.write(' '.join(prestart.run_cmd()) + '\n')
+        elif daemon_type == CephIscsi.daemon_type:
+            f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=True)) + '\n')
 
         # container run command
         f.write(' '.join(c.run_cmd()) + '\n')
@@ -1680,6 +1819,8 @@ def deploy_daemon_units(fsid, uid, gid, daemon_type, daemon_id, c,
             nfs_ganesha = NFSGanesha.init(fsid, daemon_id)
             poststop = nfs_ganesha.get_rados_grace_container('remove')
             f.write(' '.join(poststop.run_cmd()) + '\n')
+        elif daemon_type == CephIscsi.daemon_type:
+            f.write(' '.join(CephIscsi.configfs_mount_umount(data_dir, mount=False)) + '\n')
         os.fchmod(f.fileno(), 0o600)
         os.rename(data_dir + '/unit.poststop.new',
                   data_dir + '/unit.poststop')
@@ -2423,7 +2564,7 @@ def command_bootstrap():
         cmd = ['dashboard', 'ac-user-create', args.initial_dashboard_user, password, 'administrator', '--force-password']
         if not args.dashboard_password_noupdate:
             cmd.append('--pwd-update-required')
-        cli(cmd)   
+        cli(cmd)
         logger.info('Fetching dashboard port number...')
         out = cli(['config', 'get', 'mgr', 'mgr/dashboard/ssl_server_port'])
         port = int(out)
@@ -2529,6 +2670,13 @@ def command_deploy():
         deploy_daemon(args.fsid, daemon_type, daemon_id, c, uid, gid,
                       config=config, keyring=keyring,
                       reconfig=args.reconfig)
+    elif daemon_type == CephIscsi.daemon_type:
+        (config, keyring) = get_config_and_keyring()
+        (uid, gid) = extract_uid_gid()
+        c = get_container(args.fsid, daemon_type, daemon_id)
+        deploy_daemon(args.fsid, daemon_type, daemon_id, c, uid, gid,
+                      config=config, keyring=keyring,
+                      reconfig=args.reconfig)
     else:
         raise Error("{} not implemented in command_deploy function".format(daemon_type))
 
@@ -2839,6 +2987,8 @@ def list_daemons(detail=True, legacy_dir=None):
                                 version = seen_versions.get(image_id, None)
                             if daemon_type == NFSGanesha.daemon_type:
                                 version = NFSGanesha.get_version(container_id)
+                            if daemon_type == CephIscsi.daemon_type:
+                                version = CephIscsi.get_version(container_id)
                             elif not version:
                                 if daemon_type in Ceph.daemons:
                                     out, err, code = call(
@@ -3360,7 +3510,7 @@ def command_check_host():
                 get_hostname(), args.expect_hostname))
         logger.info('Hostname "%s" matches what is expected.',
                     args.expect_hostname)
-    
+
     if errors:
         raise Error('\n'.join(errors))
 
index 73fc0c895e7cc8593740b4a0f00e592617f58aea..488f2893dc7e6bd54f115725d0ac3ee50cec75f2 100644 (file)
@@ -230,7 +230,7 @@ class HostCache():
             'deps': deps,
             'last_config': stamp,
         }
+
     def update_last_host_check(self, host):
         # type: (str) -> None
         self.last_host_check[host] = datetime.datetime.utcnow()
@@ -2222,7 +2222,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
 
     def _get_config_and_keyring(self, daemon_type, daemon_id,
                                 keyring=None,
-                                extra_config=None):
+                                extra_ceph_config=None):
         # type: (str, str, Optional[str], Optional[str]) -> Dict[str, Any]
         # keyring
         if not keyring:
@@ -2239,8 +2239,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
         ret, config, err = self.mon_command({
             "prefix": "config generate-minimal-conf",
         })
-        if extra_config:
-            config += extra_config
+        if extra_ceph_config:
+            config += extra_ceph_config
 
         return {
             'config': config,
@@ -2254,6 +2254,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
                        osd_uuid_map=None):
         if not extra_args:
             extra_args = []
+        if not extra_config:
+            extra_config = {}
         name = '%s.%s' % (daemon_type, daemon_id)
 
         start_time = datetime.datetime.utcnow()
@@ -2277,7 +2279,9 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
             cephadm_config = self._get_config_and_keyring(
                     daemon_type, daemon_id,
                     keyring=keyring,
-                    extra_config=extra_config)
+                    extra_ceph_config=extra_config.pop('config', ''))
+            if extra_config:
+                cephadm_config.update({'files': extra_config})
             extra_args.extend(['--config-json', '-'])
 
             # osd deployments needs an --osd-uuid arg
@@ -2376,11 +2380,13 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
             'prometheus': self._create_prometheus,
             'node-exporter': self._create_node_exporter,
             'crash': self._create_crash,
+            'iscsi': self._create_iscsi,
         }
         config_fns = {
             'mds': self._config_mds,
             'rgw': self._config_rgw,
             'nfs': self._config_nfs,
+            'iscsi': self._config_iscsi,
         }
         create_func = create_fns.get(daemon_type, None)
         if not create_func:
@@ -2584,6 +2590,8 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
                 args.append((daemon_id, host, network))  # type: ignore
             elif daemon_type == 'nfs':
                 args.append((daemon_id, host, spec)) # type: ignore
+            elif daemon_type == 'iscsi':
+                args.append((daemon_id, host, spec))  # type: ignore
             else:
                 args.append((daemon_id, host))  # type: ignore
 
@@ -2644,7 +2652,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
 
         return self._create_daemon('mon', name, host,
                                    keyring=keyring,
-                                   extra_config=extra_config)
+                                   extra_config={'config': extra_config})
 
     def add_mon(self, spec):
         # type: (ServiceSpec) -> orchestrator.Completion
@@ -2677,6 +2685,7 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
                 'mgr': PlacementSpec(count=2),
                 'mds': PlacementSpec(count=2),
                 'rgw': PlacementSpec(count=2),
+                'iscsi': PlacementSpec(count=1),
                 'rbd-mirror': PlacementSpec(count=2),
                 'nfs': PlacementSpec(count=1),
                 'grafana': PlacementSpec(count=1),
@@ -2782,6 +2791,45 @@ class CephadmOrchestrator(orchestrator.Orchestrator, MgrModule):
     def apply_rgw(self, spec):
         return self._apply(spec)
 
+    def add_iscsi(self, spec):
+        # type: (ServiceSpec) -> orchestrator.Completion
+        return self._add_daemon('iscsi', spec, self._create_iscsi, self._config_iscsi)
+
+    def _config_iscsi(self, spec):
+        logger.info('Saving service %s spec with placement %s' % (
+            spec.service_name(), spec.placement.pretty_str()))
+        self.spec_store.save(spec)
+
+    def _create_iscsi(self, igw_id, host, spec):
+        ret, keyring, err = self.mon_command({
+            'prefix': 'auth get-or-create',
+            'entity': utils.name_to_config_section('iscsi') + '.' + igw_id,
+            'caps': ['mon', 'allow rw',
+                     'osd', f'allow rwx pool={spec.pool}'],
+        })
+
+        api_secure = 'false' if spec.api_secure is None else spec.api_secure
+        igw_conf = f"""
+# generated by cephadm
+[config]
+cluster_client_name = {utils.name_to_config_section('iscsi')}.{igw_id}
+pool = {spec.pool}
+trusted_ip_list = {spec.trusted_ip_list or ''}
+minimum_gateways = 1
+fqdn_enabled = {spec.fqdn_enabled or ''}
+api_port = {spec.api_port or ''}
+api_user = {spec.api_user or ''}
+api_password = {spec.api_password or ''}
+api_secure = {api_secure}
+"""
+        extra_config = {'iscsi-gateway.cfg': igw_conf}
+        return self._create_daemon('iscsi', igw_id, host, keyring=keyring,
+                                   extra_config=extra_config)
+
+    @trivial_completion
+    def apply_iscsi(self, spec):
+        return self._apply(spec)
+
     def add_rbd_mirror(self, spec):
         return self._add_daemon('rbd-mirror', spec, self._create_rbd_mirror)
 
index 49bcafbce4aa9164b62b21b10e8e55e9aedb0c09..a77d25f63a27a56bbee63782ba87c5c7f4170c5e 100644 (file)
@@ -13,7 +13,7 @@ except ImportError:
     pass
 
 from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
-    NFSServiceSpec
+    NFSServiceSpec, IscsiServiceSpec
 from orchestrator import ServiceDescription, DaemonDescription, InventoryHost, \
     HostSpec, OrchestratorError
 from tests import mock
@@ -321,6 +321,15 @@ class TestCephadm(object):
             [out] = wait(cephadm_module, c)
             match_glob(out, "Deployed nfs.name.* on host 'test'")
 
+    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    def test_iscsi(self, cephadm_module):
+        with self._with_host(cephadm_module, 'test'):
+            ps = PlacementSpec(hosts=['test'], count=1)
+            spec = IscsiServiceSpec('name', pool='pool', placement=ps)
+            c = cephadm_module.add_iscsi(spec)
+            [out] = wait(cephadm_module, c)
+            match_glob(out, "Deployed iscsi.name.* on host 'test'")
+
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
     def test_prometheus(self, cephadm_module):
         with self._with_host(cephadm_module, 'test'):
@@ -408,6 +417,15 @@ class TestCephadm(object):
             assert wait(cephadm_module, c) == 'Scheduled nfs update...'
             assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
 
+    @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
+    def test_apply_iscsi_save(self, cephadm_module):
+        with self._with_host(cephadm_module, 'test'):
+            ps = PlacementSpec(hosts=['test'], count=1)
+            spec = IscsiServiceSpec('name', pool='pool', placement=ps)
+            c = cephadm_module.apply_iscsi(spec)
+            assert wait(cephadm_module, c) == 'Scheduled iscsi update...'
+            assert [d.spec for d in wait(cephadm_module, cephadm_module.describe_service())] == [spec]
+
     @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
     def test_apply_prometheus_save(self, cephadm_module):
         with self._with_host(cephadm_module, 'test'):
index 3ecbb60d9ea5af46e7711b360e37ceb84a4dcf9f..3cc6d6e1a74ab17fe5e422f7fec6a61dc3c52514 100644 (file)
@@ -7,7 +7,7 @@ def name_to_config_section(name):
     Map from daemon names to ceph entity names (as seen in config)
     """
     daemon_type = name.split('.', 1)[0]
-    if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash']:
+    if daemon_type in ['rgw', 'rbd-mirror', 'nfs', 'crash', 'iscsi']:
         return 'client.' + name
     elif daemon_type in ['mon', 'osd', 'mds', 'mgr', 'client']:
         return name
index 8f88b66d560af756da01d819bcb32346a557cf85..a02572a074e3c3ad7395156b73fd6a52846b80bc 100644 (file)
@@ -16,7 +16,7 @@ import errno
 
 from ceph.deployment import inventory
 from ceph.deployment.service_spec import ServiceSpec, NFSServiceSpec, RGWSpec, \
-    ServiceSpecValidationError
+    ServiceSpecValidationError, IscsiServiceSpec
 from ceph.deployment.drive_group import DriveGroupSpec
 
 from mgr_module import MgrModule, CLICommand, HandleCommandResult
@@ -855,6 +855,7 @@ class Orchestrator(object):
             'alertmanager': self.apply_alertmanager,
             'crash': self.apply_crash,
             'grafana': self.apply_grafana,
+            'iscsi': cast(Callable[[ServiceSpec], Completion], self.apply_iscsi),
             'mds': self.apply_mds,
             'mgr': self.apply_mgr,
             'mon': self.apply_mon,
@@ -1035,6 +1036,16 @@ class Orchestrator(object):
         """Update NFS cluster"""
         raise NotImplementedError()
 
+    def add_iscsi(self, spec):
+        # type: (IscsiServiceSpec) -> Completion
+        """Create iscsi daemon(s)"""
+        raise NotImplementedError()
+
+    def apply_iscsi(self, spec):
+        # type: (IscsiServiceSpec) -> Completion
+        """Update iscsi cluster"""
+        raise NotImplementedError()
+
     def add_prometheus(self, spec):
         # type: (ServiceSpec) -> Completion
         """Create new prometheus daemon"""
@@ -1271,12 +1282,12 @@ class DaemonDescription(object):
         if self.daemon_type == 'rgw':
             v = self.daemon_id.split('.')
             return '.'.join(v[0:2])
-        if self.daemon_type in ['mds', 'nfs']:
+        if self.daemon_type in ['mds', 'nfs', 'iscsi']:
             return self.daemon_id.split('.')[0]
         return self.daemon_type
 
     def service_name(self):
-        if self.daemon_type in ['rgw', 'mds', 'nfs']:
+        if self.daemon_type in ['rgw', 'mds', 'nfs', 'iscsi']:
             return f'{self.daemon_type}.{self.service_id()}'
         return self.daemon_type
 
index 60d84386d1316661512058c5e1a20dd3356a517c..d3dbf645fdfad29e37417a2fdd684204d730b7ac 100644 (file)
@@ -25,7 +25,7 @@ from ._interface import OrchestratorClientMixin, DeviceLightLoc, _cli_read_comma
     raise_if_exception, _cli_write_command, TrivialReadCompletion, OrchestratorError, \
     NoOrchestrator, OrchestratorValidationError, NFSServiceSpec, \
     RGWSpec, InventoryFilter, InventoryHost, HostSpec, CLICommandMeta, \
-    ServiceDescription
+    ServiceDescription, IscsiServiceSpec
 
 def nice_delta(now, t, suffix=''):
     if t:
@@ -590,6 +590,8 @@ Usage:
             completion = self.add_node_exporter(spec)
         elif daemon_type == 'prometheus':
             completion = self.add_prometheus(spec)
+        elif daemon_type == 'iscsi':
+            completion = self.add_iscsi(spec)
         else:
             raise OrchestratorValidationError(f'unknown daemon type `{daemon_type}`')
 
@@ -641,6 +643,39 @@ Usage:
         raise_if_exception(completion)
         return HandleCommandResult(stdout=completion.result_str())
 
+    @_cli_write_command(
+        'orch daemon add iscsi',
+        'name=pool,type=CephString '
+        'name=fqdn_enabled,type=CephString,req=false '
+        'name=trusted_ip_list,type=CephString,req=false '
+        'name=placement,type=CephString,req=false',
+        'Start iscsi daemon(s)')
+    def _iscsi_add(self, pool, fqdn_enabled=None, trusted_ip_list=None, placement=None, inbuf=None):
+        usage = """
+        Usage:
+          ceph orch daemon add iscsi -i <json_file>
+          ceph orch daemon add iscsi <pool>
+                """
+        if inbuf:
+            try:
+                iscsi_spec = IscsiServiceSpec.from_json(json.loads(inbuf))
+            except ValueError as e:
+                msg = 'Failed to read JSON input: {}'.format(str(e)) + usage
+                return HandleCommandResult(-errno.EINVAL, stderr=msg)
+        else:
+            iscsi_spec = IscsiServiceSpec(
+                service_id='iscsi',
+                pool=pool,
+                fqdn_enabled=fqdn_enabled,
+                trusted_ip_list=trusted_ip_list,
+                placement=PlacementSpec.from_string(placement),
+            )
+
+        completion = self.add_iscsi(iscsi_spec)
+        self._orchestrator_wait([completion])
+        raise_if_exception(completion)
+        return HandleCommandResult(stdout=completion.result_str())
+
     @_cli_write_command(
         'orch daemon add nfs',
         "name=svc_arg,type=CephString "
index bfcbdd73ffbc98436fd52fcdd9ba3e05d09d40d1..79490a3c9117244721457a7ed12a41f12ca9fd78 100644 (file)
@@ -344,7 +344,7 @@ class ServiceSpec(object):
     start the services.
 
     """
-    KNOWN_SERVICE_TYPES = 'alertmanager crash grafana mds mgr mon nfs ' \
+    KNOWN_SERVICE_TYPES = 'alertmanager crash grafana iscsi mds mgr mon nfs ' \
                           'node-exporter osd prometheus rbd-mirror rgw'.split()
 
     @classmethod
@@ -354,7 +354,8 @@ class ServiceSpec(object):
         ret = {
             'rgw': RGWSpec,
             'nfs': NFSServiceSpec,
-            'osd': DriveGroupSpec
+            'osd': DriveGroupSpec,
+            'iscsi': IscsiServiceSpec,
         }.get(service_type, cls)
         if ret == ServiceSpec and not service_type:
             raise ServiceSpecValidationError('Spec needs a "service_type" key.')
@@ -472,7 +473,7 @@ def servicespec_validate_add(self: ServiceSpec):
     # This must not be a method of ServiceSpec, otherwise you'll hunt
     # sub-interpreter affinity bugs.
     ServiceSpec.validate(self)
-    if self.service_type in ['mds', 'rgw', 'nfs'] and not self.service_id:
+    if self.service_type in ['mds', 'rgw', 'nfs', 'iscsi'] and not self.service_id:
         raise ServiceSpecValidationError('Cannot add Service: id required')
 
 
@@ -542,3 +543,39 @@ class RGWSpec(ServiceSpec):
             return 443
         else:
             return 80
+
+
+class IscsiServiceSpec(ServiceSpec):
+    def __init__(self, service_id, pool=None,
+                 placement=None,
+                 trusted_ip_list=None,
+                 fqdn_enabled=None,
+                 api_port=None,
+                 api_user=None,
+                 api_password=None,
+                 api_secure=None,
+                 ssl_cert=None,
+                 ssl_key=None,
+                 service_type='iscsi',
+                 unmanaged=False):
+        assert service_type == 'iscsi'
+        super(IscsiServiceSpec, self).__init__('iscsi', service_id=service_id,
+                                               placement=placement, unmanaged=unmanaged)
+
+        #: RADOS pool where ceph-iscsi config data is stored.
+        self.pool = pool
+        self.trusted_ip_list = trusted_ip_list
+        self.fqdn_enabled = fqdn_enabled
+        self.api_port = api_port
+        self.api_user = api_user
+        self.api_password = api_password
+        self.api_secure = api_secure
+        self.ssl_cert = ssl_cert
+        self.ssl_key = ssl_key
+
+    def validate_add(self):
+        servicespec_validate_add(self)
+
+        if not self.pool:
+            raise ServiceSpecValidationError(
+                'Cannot add ISCSI: No Pool specified')