]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
cephadm: move get_container_info out of cephadm.py 61761/head
authorJohn Mulligan <jmulligan@redhat.com>
Tue, 11 Feb 2025 19:53:02 +0000 (14:53 -0500)
committerJohn Mulligan <jmulligan@redhat.com>
Tue, 11 Feb 2025 21:08:05 +0000 (16:08 -0500)
Now that get_container_info is clean of dependencies on functions and
classes that only exist in cephadm.py we can move it.
I chose to move it to a new module, container_lookup.py, because I
didn't see any really good homes for it - other modules were too low
level or it didn't fit thematically.

Signed-off-by: John Mulligan <jmulligan@redhat.com>
src/cephadm/cephadm.py
src/cephadm/cephadmlib/container_lookup.py [new file with mode: 0644]
src/cephadm/tests/test_cephadm.py

index 86f5bbf5d73a2c4f6bd310c1c9a9d5bfaa690cd6..562081dd518d5dda010cfb5ef590376dd9b28ce0 100755 (executable)
@@ -85,12 +85,10 @@ from cephadmlib.call_wrappers import (
     concurrent_tasks,
 )
 from cephadmlib.container_engines import (
-    ContainerInfo,
     Podman,
     check_container_engine,
     find_container_engine,
     normalize_container_id,
-    parsed_container_image_stats,
     parsed_container_mem_usage,
     pull_command,
     registry_login,
@@ -146,7 +144,6 @@ from cephadmlib.container_types import (
     InitContainer,
     SidecarContainer,
     extract_uid_gid,
-    get_container_stats,
     get_mgr_images,
     is_container_running,
 )
@@ -204,6 +201,7 @@ from cephadmlib.listing_updaters import (
     MemUsageStatusUpdater,
     VersionStatusUpdater,
 )
+from cephadmlib.container_lookup import get_container_info
 
 
 FuncT = TypeVar('FuncT', bound=Callable)
@@ -467,82 +465,6 @@ def update_default_image(ctx: CephadmContext) -> None:
         ctx.image = _get_default_image(ctx)
 
 
-def get_container_info(
-    ctx: CephadmContext, daemon_filter: str, by_name: bool
-) -> Optional[ContainerInfo]:
-    """
-    :param ctx: Cephadm context
-    :param daemon_filter: daemon name or type
-    :param by_name: must be set to True if daemon name is provided
-    :return: Container information or None
-    """
-    if by_name and '.' not in daemon_filter:
-        logger.warning(
-            f'Trying to get container info using invalid daemon name {daemon_filter}'
-        )
-        return None
-
-    # configure filters: fsid and (daemon name or daemon type)
-    kwargs = {
-        'fsid': ctx.fsid,
-        ('daemon_name' if by_name else 'daemon_type'): daemon_filter,
-    }
-    # use keep_container_info to cache the ContainerInfo generated
-    # during the loop and hopefully avoid having to perform the same
-    # lookup right away.
-    _cinfo_key = '_container_info'
-    _updater = CoreStatusUpdater(keep_container_info=_cinfo_key)
-    matching_daemons = [
-        _updater.expand(ctx, entry)
-        for entry in daemons_matching(ctx, **kwargs)
-    ]
-
-    if not matching_daemons:
-        # no matches at all
-        logger.debug(
-            'no daemons match: daemon_filter=%r, by_name=%r',
-            daemon_filter,
-            by_name,
-        )
-        return None
-    if by_name and len(matching_daemons) > 1:
-        # too many matches while searching by name
-        logger.warning(
-            f'Found multiple daemons sharing same name: {daemon_filter}'
-        )
-        # Prefer to take the first daemon we find that is actually running, or
-        # just the first in the list if none are running
-        # (key reminder: false (0) sorts before true (1))
-        matching_daemons = sorted(
-            matching_daemons, key=lambda d: d.get('state') != 'running'
-        )
-
-    matched_deamon = matching_daemons[0]
-    is_running = matched_deamon.get('state') == 'running'
-    image_name = matched_deamon.get('container_image_name', '')
-    if is_running:
-        cinfo = matched_deamon.get(_cinfo_key)
-        if cinfo:
-            # found a cached ContainerInfo while getting daemon statuses
-            return cinfo
-        return get_container_stats(
-            ctx,
-            DaemonIdentity.from_name(
-                matched_deamon['fsid'], matched_deamon['name']
-            ),
-        )
-    elif image_name:
-        # this daemon's container is not running. the regular container inspect
-        # command will not work. Fall back to inspecting the container image
-        assert isinstance(image_name, str)
-        return parsed_container_image_stats(ctx, image_name)
-    # not running, but no image name to look up!
-    logger.debug(
-        'bad daemon state: no image, not running: %r', matched_deamon
-    )
-    return None
-
-
 def infer_local_ceph_image(ctx: CephadmContext, container_path: str) -> Optional[str]:
     """
      Infer the local ceph image based on the following priority criteria:
diff --git a/src/cephadm/cephadmlib/container_lookup.py b/src/cephadm/cephadmlib/container_lookup.py
new file mode 100644 (file)
index 0000000..a45698b
--- /dev/null
@@ -0,0 +1,91 @@
+# container_lookup.py - high-level functions for getting container info
+
+from typing import Optional
+
+import logging
+
+from .container_engines import ContainerInfo, parsed_container_image_stats
+from .container_types import get_container_stats
+from .context import CephadmContext
+from .daemon_identity import DaemonIdentity
+from .listing import daemons_matching
+from .listing_updaters import CoreStatusUpdater
+
+
+logger = logging.getLogger()
+
+
+def get_container_info(
+    ctx: CephadmContext, daemon_filter: str, by_name: bool
+) -> Optional[ContainerInfo]:
+    """
+    :param ctx: Cephadm context
+    :param daemon_filter: daemon name or type
+    :param by_name: must be set to True if daemon name is provided
+    :return: Container information or None
+    """
+    if by_name and '.' not in daemon_filter:
+        logger.warning(
+            f'Trying to get container info using invalid daemon name {daemon_filter}'
+        )
+        return None
+
+    # configure filters: fsid and (daemon name or daemon type)
+    kwargs = {
+        'fsid': ctx.fsid,
+        ('daemon_name' if by_name else 'daemon_type'): daemon_filter,
+    }
+    # use keep_container_info to cache the ContainerInfo generated
+    # during the loop and hopefully avoid having to perform the same
+    # lookup right away.
+    _cinfo_key = '_container_info'
+    _updater = CoreStatusUpdater(keep_container_info=_cinfo_key)
+    matching_daemons = [
+        _updater.expand(ctx, entry)
+        for entry in daemons_matching(ctx, **kwargs)
+    ]
+
+    if not matching_daemons:
+        # no matches at all
+        logger.debug(
+            'no daemons match: daemon_filter=%r, by_name=%r',
+            daemon_filter,
+            by_name,
+        )
+        return None
+    if by_name and len(matching_daemons) > 1:
+        # too many matches while searching by name
+        logger.warning(
+            f'Found multiple daemons sharing same name: {daemon_filter}'
+        )
+        # Prefer to take the first daemon we find that is actually running, or
+        # just the first in the list if none are running
+        # (key reminder: false (0) sorts before true (1))
+        matching_daemons = sorted(
+            matching_daemons, key=lambda d: d.get('state') != 'running'
+        )
+
+    matched_deamon = matching_daemons[0]
+    is_running = matched_deamon.get('state') == 'running'
+    image_name = matched_deamon.get('container_image_name', '')
+    if is_running:
+        cinfo = matched_deamon.get(_cinfo_key)
+        if cinfo:
+            # found a cached ContainerInfo while getting daemon statuses
+            return cinfo
+        return get_container_stats(
+            ctx,
+            DaemonIdentity.from_name(
+                matched_deamon['fsid'], matched_deamon['name']
+            ),
+        )
+    elif image_name:
+        # this daemon's container is not running. the regular container inspect
+        # command will not work. Fall back to inspecting the container image
+        assert isinstance(image_name, str)
+        return parsed_container_image_stats(ctx, image_name)
+    # not running, but no image name to look up!
+    logger.debug(
+        'bad daemon state: no image, not running: %r', matched_deamon
+    )
+    return None
index 0bf9f492fb83d68a5e7063d0cd52e92357ddc6d6..d5dcac62cc90f51f8a761cf9a9ae7a387d8805f4 100644 (file)
@@ -39,6 +39,13 @@ def get_ceph_conf(
         mon_host = {mon_host}
 '''
 
+def _container_info(*args, **kwargs):
+    """Wrapper function for creating container info instances."""
+    import cephadmlib.container_engines
+
+    return cephadmlib.container_engines.ContainerInfo(*args, **kwargs)
+
+
 @contextlib.contextmanager
 def bootstrap_test_ctx(*args, **kwargs):
     with with_cephadm_ctx(*args, **kwargs) as ctx:
@@ -607,7 +614,7 @@ class TestCephAdm(object):
         ctx.container_engine = mock_podman()
 
         # make sure the right image is selected when container is found
-        cinfo = _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+        cinfo = _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
                                  'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
                                  '514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d',
                                  '2022-04-19 13:45:20.97146228 +0000 UTC',
@@ -652,7 +659,7 @@ class TestCephAdm(object):
                 ],
                 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972", "registry.hub.docker.com/rkachach/ceph:custom-v0.5", "666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4", "2022-04-19 13:45:20.97146228 +0000 UTC", ""
                  ),
-                _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+                _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
                                  'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
                                  '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
                                  '2022-04-19 13:45:20.97146228 +0000 UTC',
@@ -668,7 +675,7 @@ class TestCephAdm(object):
                 ],
                 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972", "registry.hub.docker.com/rkachach/ceph:custom-v0.5", "666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4", "2022-04-19 13:45:20.97146228 +0000 UTC", ""
                  ),
-                _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+                _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
                                  'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
                                  '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
                                  '2022-04-19 13:45:20.97146228 +0000 UTC',
@@ -684,7 +691,7 @@ class TestCephAdm(object):
                 ],
                 ("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972", "registry.hub.docker.com/rkachach/ceph:custom-v0.5", "666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4", "2022-04-19 13:45:20.97146228 +0000 UTC", ""
                  ),
-                _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+                _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
                                  'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
                                  '666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
                                  '2022-04-19 13:45:20.97146228 +0000 UTC',
@@ -813,7 +820,7 @@ class TestCephAdm(object):
             ValueError
         )
         cinfo = (
-            _cephadm.ContainerInfo(*container_stats)
+            _container_info(*container_stats)
             if container_stats
             else None
         )
@@ -884,7 +891,7 @@ class TestCephAdm(object):
         _daemons.side_effect = _dms
         _current_daemons[:] = [down_osd_json]
 
-        expected_container_info = _cephadm.ContainerInfo(
+        expected_container_info = _container_info(
             container_id='',
             image_name='quay.io/adk3798/ceph@sha256:7da0af22ce45aac97dff00125af590506d8e36ab97d78e5175149643562bfb0b',
             image_id='a03c201ff4080204949932f367545cd381c4acee0d48dbc15f2eac1e35f22318',
@@ -904,10 +911,10 @@ class TestCephAdm(object):
         # than it partially being taken from the list_daemons output
         up_osd_json = copy.deepcopy(down_osd_json)
         up_osd_json['state'] = 'running'
-        _get_stats.return_value = _cephadm.ContainerInfo('container_id', 'image_name','image_id','the_past','')
+        _get_stats.return_value = _container_info('container_id', 'image_name','image_id','the_past','')
         _current_daemons[:] = [down_osd_json, up_osd_json]
 
-        expected_container_info = _cephadm.ContainerInfo(
+        expected_container_info = _container_info(
             container_id='container_id',
             image_name='image_name',
             image_id='image_id',