]> git-server-git.apps.pok.os.sepia.ceph.com Git - teuthology.git/commitdiff
Use misc.get_scratch_devices() when necessary
authorZack Cerza <zack@redhat.com>
Tue, 23 Feb 2016 18:18:19 +0000 (11:18 -0700)
committerZack Cerza <zack@redhat.com>
Fri, 26 Feb 2016 17:01:45 +0000 (10:01 -0700)
That is, if osd_auto_discovery is disabled.

Signed-off-by: Zack Cerza <zack@redhat.com>
teuthology/task/ceph_ansible.py
teuthology/test/task/test_ceph_ansible.py

index 447149ba7f876b2c04f83bee067dc9882d5b847e..bd32589581ede58982d518064a296560f7c4e5a5 100644 (file)
@@ -1,3 +1,4 @@
+import json
 import os
 
 from cStringIO import StringIO
@@ -5,6 +6,7 @@ from cStringIO import StringIO
 from . import ansible
 
 from ..config import config as teuth_config
+from ..misc import get_scratch_devices
 
 
 class CephAnsible(ansible.Ansible):
@@ -46,6 +48,10 @@ class CephAnsible(ansible.Ansible):
         playbook: {playbook}
 
     It always uses a dynamic inventory.
+
+    It will optionally do the following automatically based on ``vars`` that
+    are passed in:
+        * Set ``devices`` for each host if ``osd_auto_discovery`` is not True
     """.format(
         git_base=teuth_config.ceph_git_base_url,
         playbook=_default_playbook,
@@ -78,20 +84,23 @@ class CephAnsible(ansible.Ansible):
             want = lambda role: role.startswith(role_prefix)
             for (remote, roles) in self.cluster.only(want).remotes.iteritems():
                 hostname = remote.hostname
+                host_vars = self.get_host_vars(remote)
                 if group not in hosts_dict:
-                    hosts_dict[group] = {hostname: None}
+                    hosts_dict[group] = {hostname: host_vars}
                 elif hostname not in hosts_dict[group]:
-                    hosts_dict[group][hostname] = None
+                    hosts_dict[group][hostname] = host_vars
 
-        print hosts_dict
         hosts_stringio = StringIO()
         for group in sorted(hosts_dict.keys()):
             hosts_stringio.write('[%s]\n' % group)
             for hostname in sorted(hosts_dict[group].keys()):
                 vars = hosts_dict[group][hostname]
                 if vars:
-                    vars_list = ['%s=%s' % (k, v)
-                                 for k, v in vars.iteritems()]
+                    vars_list = []
+                    for key in sorted(vars.keys()):
+                        vars_list.append(
+                            "%s='%s'" % (key, json.dumps(vars[key]).strip('"'))
+                        )
                     host_line = "{hostname} {vars}".format(
                         hostname=hostname,
                         vars=' '.join(vars_list),
@@ -104,4 +113,11 @@ class CephAnsible(ansible.Ansible):
         self.inventory = self._write_hosts_file(hosts_stringio.read().strip())
         self.generated_inventory = True
 
+    def get_host_vars(self, remote):
+        extra_vars = self.config.get('vars', dict())
+        host_vars = dict()
+        if not extra_vars.get('osd_auto_discovery', False):
+            host_vars['devices'] = get_scratch_devices(remote)
+        return host_vars
+
 task = CephAnsible
index 253e116eee6c36d6cee09523bcad20d079b7f7d2..8e71465b5c65a83293ca5e588ed5990dc3e244e8 100644 (file)
@@ -37,9 +37,19 @@ class TestCephAnsibleTask(TestAnsibleTask):
         )
         self.patcher_fetch_repo.start()
 
+        def fake_get_scratch_devices(remote):
+            return ['/dev/%s' % remote.shortname]
+
+        self.patcher_get_scratch_devices = patch(
+            'teuthology.task.ceph_ansible.get_scratch_devices',
+            fake_get_scratch_devices,
+        )
+        self.patcher_get_scratch_devices.start()
+
     def stop_patchers(self):
         super(TestCephAnsibleTask, self).stop_patchers()
         self.patcher_fetch_repo.stop()
+        self.patcher_get_scratch_devices.stop()
 
     def test_playbook_none(self):
         skip(SKIP_IRRELEVANT)
@@ -55,7 +65,8 @@ class TestCephAnsibleTask(TestAnsibleTask):
 
     def test_generate_hosts_file(self):
         self.task_config.update(dict(
-            playbook=[]
+            playbook=[],
+            vars=dict(osd_auto_discovery=True),
         ))
         task = self.klass(self.ctx, self.task_config)
         hosts_file_path = '/my/hosts/file'
@@ -79,3 +90,30 @@ class TestCephAnsibleTask(TestAnsibleTask):
             '[osds]',
             'remote3',
         ])
+
+    def test_generate_hosts_file_with_devices(self):
+        self.task_config.update(dict(
+            playbook=[]
+        ))
+        task = self.klass(self.ctx, self.task_config)
+        hosts_file_path = '/my/hosts/file'
+        hosts_file_obj = StringIO()
+        hosts_file_obj.name = hosts_file_path
+        with patch.object(ansible, 'NamedTemporaryFile') as m_NTF:
+            m_NTF.return_value = hosts_file_obj
+            task.generate_hosts_file()
+            m_NTF.assert_called_once_with(prefix="teuth_ansible_hosts_",
+                                            delete=False)
+        assert task.generated_inventory is True
+        assert task.inventory == hosts_file_path
+        hosts_file_obj.seek(0)
+        assert hosts_file_obj.read() == '\n'.join([
+            '[mdss]',
+            'remote2 devices=\'["/dev/remote2"]\'',
+            '',
+            '[mons]',
+            'remote1 devices=\'["/dev/remote1"]\'',
+            '',
+            '[osds]',
+            'remote3 devices=\'["/dev/remote3"]\'',
+        ])