]> git-server-git.apps.pok.os.sepia.ceph.com Git - teuthology.git/commitdiff
Creates a merge_configs function in teuthology.misc that will be used
authorAndrew Schoen <aschoen@redhat.com>
Mon, 24 Nov 2014 18:16:51 +0000 (12:16 -0600)
committerAndrew Schoen <aschoen@redhat.com>
Sat, 6 Dec 2014 19:46:47 +0000 (13:46 -0600)
by run and schedule, nuke will need this eventually as well.  The old
way of doing this was too tightly coupled with argparse.  Wrote some
tests around the new function; increased coverage on misc by 4%.

Random lint fixes as well.

Signed-off-by: Andrew Schoen <aschoen@redhat.com>
teuthology/misc.py
teuthology/schedule.py
teuthology/test/test_config.py
teuthology/test/test_misc.py

index 08f0e3b260dd397ef38055eed84dc3e5f628dd2b..48766a1f1203aeded40801aed06aa5192a26f385 100644 (file)
@@ -19,6 +19,7 @@ import yaml
 import json
 import re
 import tempfile
+import pprint
 
 from teuthology import safepath
 from teuthology.exceptions import (CommandCrashedError, CommandFailedError,
@@ -105,6 +106,24 @@ class MergeConfig(argparse.Action):
             deep_merge(config_dict, new)
 
 
+def merge_configs(config_paths):
+    """ Takes one or many paths to yaml config files and merges them
+        together, returning the result.
+    """
+    conf_dict = dict()
+    for conf_path in config_paths:
+        with file(conf_path) as partial_file:
+            partial_dict = yaml.safe_load(partial_file)
+        try:
+            conf_dict = deep_merge(conf_dict, partial_dict)
+        except Exception:
+            # TODO: Should this log as well?
+            pprint.pprint("failed to merge {0} into {1}".format(conf_dict, partial_dict))
+            raise
+
+    return conf_dict
+
+
 def get_testdir(ctx):
     """
     :returns: A test directory
@@ -183,7 +202,7 @@ def get_ceph_binary_url(package=None,
         arch=arch,
         format=format,
         dist=dist
-        )
+    )
 
     if sha1 is not None:
         assert branch is None, "cannot set both sha1 and branch"
@@ -258,7 +277,7 @@ def get_mons(roles, ips):
             addr = '{ip}:{port}'.format(
                 ip=ips[idx],
                 port=mon_ports[ips[idx]],
-                )
+            )
             mon_id += 1
             mons[role] = addr
     assert mons
@@ -275,18 +294,18 @@ def generate_caps(type_):
         osd=dict(
             mon='allow *',
             osd='allow *',
-            ),
+        ),
         mds=dict(
             mon='allow *',
             osd='allow *',
             mds='allow',
-            ),
+        ),
         client=dict(
             mon='allow rw',
             osd='allow rwx',
             mds='allow',
-            ),
-        )
+        ),
+    )
     for subsystem, capability in defaults[type_].items():
         yield '--cap'
         yield subsystem
@@ -315,7 +334,7 @@ def skeleton_config(ctx, roles, ips):
             if role.startswith('mds.'):
                 conf.setdefault(role, {})
                 if role.find('-s-') != -1:
-                    standby_mds = role[role.find('-s-')+3:]
+                    standby_mds = role[role.find('-s-') + 3:]
                     conf[role]['mds standby for name'] = standby_mds
     return conf
 
@@ -430,18 +449,18 @@ def create_simple_monmap(ctx, remote, conf):
         'monmaptool',
         '--create',
         '--clobber',
-        ]
+    ]
     for (name, addr) in addresses:
         args.extend(('--add', name, addr))
     args.extend([
         '--print',
         '{tdir}/monmap'.format(tdir=testdir),
-        ])
+    ])
 
     r = remote.run(
         args=args,
         stdout=StringIO()
-        )
+    )
     monmap_output = r.stdout.getvalue()
     fsid = re.search("generated fsid (.+)$",
                      monmap_output, re.MULTILINE).group(1)
@@ -462,9 +481,9 @@ def write_file(remote, path, data):
             '-c',
             'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
             path,
-            ],
+        ],
         stdin=data,
-        )
+    )
 
 
 def sudo_write_file(remote, path, data, perms=None, owner=None):
@@ -492,9 +511,9 @@ def sudo_write_file(remote, path, data, perms=None, owner=None):
             '-c',
             'import shutil, sys; shutil.copyfileobj(sys.stdin, file(sys.argv[1], "wb"))',
             path,
-            ] + owner_args + permargs,
+        ] + owner_args + permargs,
         stdin=data,
-        )
+    )
 
 
 def copy_file(from_remote, from_path, to_remote, to_path=None):
@@ -524,11 +543,11 @@ def move_file(remote, from_path, to_path, sudo=False):
         '-c',
         '\"%a\"',
         to_path
-        ])
+    ])
     proc = remote.run(
         args=args,
         stdout=StringIO(),
-        )
+    )
     perms = proc.stdout.getvalue().rstrip().strip('\"')
 
     args = []
@@ -539,11 +558,11 @@ def move_file(remote, from_path, to_path, sudo=False):
         '--',
         from_path,
         to_path,
-        ])
+    ])
     proc = remote.run(
         args=args,
         stdout=StringIO(),
-        )
+    )
 
     # reset the file back to the original permissions
     args = []
@@ -553,11 +572,11 @@ def move_file(remote, from_path, to_path, sudo=False):
         'chmod',
         perms,
         to_path,
-        ])
+    ])
     proc = remote.run(
         args=args,
         stdout=StringIO(),
-        )
+    )
 
 
 def delete_file(remote, path, sudo=False, force=False):
@@ -577,7 +596,7 @@ def delete_file(remote, path, sudo=False, force=False):
     remote.run(
         args=args,
         stdout=StringIO(),
-        )
+    )
 
 
 def remove_lines_from_file(remote, path, line_is_valid_test,
@@ -662,7 +681,7 @@ def create_file(remote, path, data="", permissions=str(644), sudo=False):
     remote.run(
         args=args,
         stdout=StringIO(),
-        )
+    )
     # now write out the data if any was passed in
     if "" != data:
         append_lines_to_file(remote, path, data, sudo)
@@ -742,9 +761,9 @@ def get_wwn_id_map(remote, devs):
                 'ls',
                 '-l',
                 '/dev/disk/by-id/wwn-*',
-                ],
+            ],
             stdout=StringIO(),
-            )
+        )
         stdout = r.stdout.getvalue()
     except Exception:
         log.error('Failed to get wwn devices! Using /dev/sd* devices...')
@@ -785,7 +804,7 @@ def get_scratch_devices(remote):
         r = remote.run(
             args=['ls', run.Raw('/dev/[sv]d?')],
             stdout=StringIO()
-            )
+        )
         devs = r.stdout.getvalue().strip().split('\n')
 
     # Remove root device (vm guests) from the disk list
@@ -836,10 +855,10 @@ def wait_until_healthy(ctx, remote):
                     '{tdir}/archive/coverage'.format(tdir=testdir),
                     'ceph',
                     'health',
-                    ],
+                ],
                 stdout=StringIO(),
                 logger=log.getChild('health'),
-                )
+            )
             out = r.stdout.getvalue()
             log.debug('Ceph health: %s', out.rstrip('\n'))
             if out.split(None, 1)[0] == 'HEALTH_OK':
@@ -859,10 +878,10 @@ def wait_until_osds_up(ctx, cluster, remote):
                 '{tdir}/archive/coverage'.format(tdir=testdir),
                 'ceph',
                 'osd', 'dump', '--format=json'
-                ],
+            ],
             stdout=StringIO(),
             logger=log.getChild('health'),
-            )
+        )
         out = r.stdout.getvalue()
         j = json.loads('\n'.join(out.split('\n')[1:]))
         up = len(j['osds'])
@@ -1060,18 +1079,18 @@ def get_valgrind_args(testdir, name, preamble, v):
             '--xml=yes',
             '--xml-file={vdir}/{n}.log'.format(vdir=val_path, n=name),
             '--time-stamp=yes',
-            ]
+        ]
     else:
         extra_args = [
             'valgrind',
             '--suppressions={tdir}/valgrind.supp'.format(tdir=testdir),
             '--log-file={vdir}/{n}.log'.format(vdir=val_path, n=name),
             '--time-stamp=yes',
-            ]
+        ]
     args = [
         'cd', testdir,
         run.Raw('&&'),
-        ] + preamble + extra_args + v
+    ] + preamble + extra_args + v
     log.debug('running %s under valgrind with args %s', name, args)
     return args
 
index ea450ec1710aa1c9e627415dc667a0426aba9e79..35dae1a44f92f72689130d65d385fd6a3aedc0ab 100644 (file)
@@ -2,7 +2,7 @@ import pprint
 import yaml
 
 import teuthology.beanstalk
-from teuthology.misc import deep_merge, get_user
+from teuthology.misc import get_user, merge_configs
 from teuthology import report
 
 
@@ -30,15 +30,7 @@ def build_config(args):
     Given a dict of arguments, build a job config
     """
     config_paths = args.get('<conf_file>', list())
-    conf_dict = dict()
-    for conf_path in config_paths:
-        with file(conf_path) as partial_file:
-            partial_dict = yaml.safe_load(partial_file)
-        try:
-            conf_dict = deep_merge(conf_dict, partial_dict)
-        except Exception:
-            pprint.pprint("failed to merge {0} into {1}".format(conf_dict, partial_dict))
-            raise
+    conf_dict = merge_configs(config_paths)
     # strip out targets; the worker will allocate new ones when we run
     # the job with --lock.
     if 'targets' in conf_dict:
index 7d33f97317615f2a14c614e6d7be89bfc38d2d3e..727b4324110c84e1b6f6f72aee88e623071dab6a 100644 (file)
@@ -34,11 +34,12 @@ class TestYamlConfig(object):
         assert conf_obj.to_str() == in_str
 
     def test_update(self):
-        conf_obj = self.test_class()
+        conf_obj = self.test_class(dict())
         conf_obj.foo = 'foo'
         conf_obj.bar = 'bar'
         conf_obj.update(dict(bar='baz'))
-        assert conf_obj.to_dict() == dict(foo='foo', bar='baz')
+        assert conf_obj.foo == 'foo'
+        assert conf_obj.bar == 'baz'
 
     def test_delattr(self):
         conf_obj = self.test_class()
@@ -69,19 +70,6 @@ class TestTeuthologyConfig(TestYamlConfig):
         conf_obj.something = 'something else'
         assert conf_obj.something == 'something else'
 
-    def test_update(self):
-        """
-        This is slightly different thank TestYamlConfig.update() in that it
-        only tests what was updated - since to_dict() yields all values,
-        including defaults.
-        """
-        conf_obj = self.test_class()
-        conf_obj.foo = 'foo'
-        conf_obj.bar = 'bar'
-        conf_obj.update(dict(bar='baz'))
-        assert conf_obj.foo == 'foo'
-        assert conf_obj.bar == 'baz'
-
 
 class TestJobConfig(TestYamlConfig):
     def setup(self):
@@ -118,16 +106,3 @@ class TestFakeNamespace(TestYamlConfig):
         assert conf_obj.teuthology_config.archive_base
         assert not conf_obj.teuthology_config.automated_scheduling
         assert conf_obj.teuthology_config.ceph_git_base_url == 'https://github.com/ceph/'
-
-    def test_update(self):
-        """
-        This is slightly different thank TestYamlConfig.update() in that it
-        only tests what was updated - since to_dict() yields all values,
-        including defaults.
-        """
-        conf_obj = self.test_class(dict())
-        conf_obj.foo = 'foo'
-        conf_obj.bar = 'bar'
-        conf_obj.update(dict(bar='baz'))
-        assert conf_obj.foo == 'foo'
-        assert conf_obj.bar == 'baz'
index bb963dff98c01ba5ace50eb47b9ce7a009d8c2e0..7671006c3b0ff33d97301c1c778200c97bc127ee 100644 (file)
@@ -1,4 +1,6 @@
 import argparse
+
+from mock import patch
 from ..orchestra import cluster
 from .. import misc
 from ..config import config
@@ -16,8 +18,8 @@ def test_get_clients_simple():
     ctx.cluster = cluster.Cluster(
         remotes=[
             (remote, ['client.0', 'client.1'])
-            ],
-        )
+        ],
+    )
     g = misc.get_clients(ctx=ctx, roles=['client.1'])
     got = next(g)
     assert len(got) == 2
@@ -90,3 +92,59 @@ class TestHostnames(object):
         host = 'ubuntu@box1.example.com'
         result = misc.decanonicalize_hostname(host)
         assert result == 'box1'
+
+
+class TestMergeConfigs(object):
+    """ Tests merge_config and deep_merge in teuthology.misc """
+
+    @patch("yaml.safe_load")
+    @patch("__builtin__.file")
+    def test_merge_configs(self, m_file, m_safe_load):
+        """ Only tests with one yaml file being passed, mainly just to test
+            the loop logic.  The actual merge will be tested in subsequent
+            tests.
+        """
+        expected = {"a": "b", "b": "c"}
+        m_safe_load.return_value = expected
+        result = misc.merge_configs(["path/to/config1"])
+        assert result == expected
+        m_file.assert_called_once_with("path/to/config1")
+
+    def test_merge_configs_empty(self):
+        assert misc.merge_configs([]) == {}
+
+    def test_deep_merge(self):
+        a = {"a": "b"}
+        b = {"b": "c"}
+        result = misc.deep_merge(a, b)
+        assert result == {"a": "b", "b": "c"}
+
+    def test_overwrite_deep_merge(self):
+        a = {"a": "b"}
+        b = {"a": "overwritten", "b": "c"}
+        result = misc.deep_merge(a, b)
+        assert result == {"a": "overwritten", "b": "c"}
+
+    def test_list_deep_merge(self):
+        a = [1, 2]
+        b = [3, 4]
+        result = misc.deep_merge(a, b)
+        assert result == [1, 2, 3, 4]
+
+    def test_missing_list_deep_merge(self):
+        a = [1, 2]
+        b = "not a list"
+        with pytest.raises(AssertionError):
+            misc.deep_merge(a, b)
+
+    def test_missing_a_deep_merge(self):
+        result = misc.deep_merge(None, [1, 2])
+        assert result == [1, 2]
+
+    def test_missing_b_deep_merge(self):
+        result = misc.deep_merge([1, 2], None)
+        assert result == [1, 2]
+
+    def test_invalid_b_deep_merge(self):
+        with pytest.raises(AssertionError):
+            misc.deep_merge({"a": "b"}, "invalid")