concurrent_tasks,
)
from cephadmlib.container_engines import (
- ContainerInfo,
Podman,
check_container_engine,
find_container_engine,
normalize_container_id,
- parsed_container_image_stats,
parsed_container_mem_usage,
pull_command,
registry_login,
InitContainer,
SidecarContainer,
extract_uid_gid,
- get_container_stats,
get_mgr_images,
is_container_running,
)
MemUsageStatusUpdater,
VersionStatusUpdater,
)
+from cephadmlib.container_lookup import get_container_info
FuncT = TypeVar('FuncT', bound=Callable)
ctx.image = _get_default_image(ctx)
-def get_container_info(
- ctx: CephadmContext, daemon_filter: str, by_name: bool
-) -> Optional[ContainerInfo]:
- """
- :param ctx: Cephadm context
- :param daemon_filter: daemon name or type
- :param by_name: must be set to True if daemon name is provided
- :return: Container information or None
- """
- if by_name and '.' not in daemon_filter:
- logger.warning(
- f'Trying to get container info using invalid daemon name {daemon_filter}'
- )
- return None
-
- # configure filters: fsid and (daemon name or daemon type)
- kwargs = {
- 'fsid': ctx.fsid,
- ('daemon_name' if by_name else 'daemon_type'): daemon_filter,
- }
- # use keep_container_info to cache the ContainerInfo generated
- # during the loop and hopefully avoid having to perform the same
- # lookup right away.
- _cinfo_key = '_container_info'
- _updater = CoreStatusUpdater(keep_container_info=_cinfo_key)
- matching_daemons = [
- _updater.expand(ctx, entry)
- for entry in daemons_matching(ctx, **kwargs)
- ]
-
- if not matching_daemons:
- # no matches at all
- logger.debug(
- 'no daemons match: daemon_filter=%r, by_name=%r',
- daemon_filter,
- by_name,
- )
- return None
- if by_name and len(matching_daemons) > 1:
- # too many matches while searching by name
- logger.warning(
- f'Found multiple daemons sharing same name: {daemon_filter}'
- )
- # Prefer to take the first daemon we find that is actually running, or
- # just the first in the list if none are running
- # (key reminder: false (0) sorts before true (1))
- matching_daemons = sorted(
- matching_daemons, key=lambda d: d.get('state') != 'running'
- )
-
- matched_deamon = matching_daemons[0]
- is_running = matched_deamon.get('state') == 'running'
- image_name = matched_deamon.get('container_image_name', '')
- if is_running:
- cinfo = matched_deamon.get(_cinfo_key)
- if cinfo:
- # found a cached ContainerInfo while getting daemon statuses
- return cinfo
- return get_container_stats(
- ctx,
- DaemonIdentity.from_name(
- matched_deamon['fsid'], matched_deamon['name']
- ),
- )
- elif image_name:
- # this daemon's container is not running. the regular container inspect
- # command will not work. Fall back to inspecting the container image
- assert isinstance(image_name, str)
- return parsed_container_image_stats(ctx, image_name)
- # not running, but no image name to look up!
- logger.debug(
- 'bad daemon state: no image, not running: %r', matched_deamon
- )
- return None
-
-
def infer_local_ceph_image(ctx: CephadmContext, container_path: str) -> Optional[str]:
"""
Infer the local ceph image based on the following priority criteria:
--- /dev/null
+# container_lookup.py - high-level functions for getting container info
+
+from typing import Optional
+
+import logging
+
+from .container_engines import ContainerInfo, parsed_container_image_stats
+from .container_types import get_container_stats
+from .context import CephadmContext
+from .daemon_identity import DaemonIdentity
+from .listing import daemons_matching
+from .listing_updaters import CoreStatusUpdater
+
+
+logger = logging.getLogger()
+
+
+def get_container_info(
+ ctx: CephadmContext, daemon_filter: str, by_name: bool
+) -> Optional[ContainerInfo]:
+ """
+ :param ctx: Cephadm context
+ :param daemon_filter: daemon name or type
+ :param by_name: must be set to True if daemon name is provided
+ :return: Container information or None
+ """
+ if by_name and '.' not in daemon_filter:
+ logger.warning(
+ f'Trying to get container info using invalid daemon name {daemon_filter}'
+ )
+ return None
+
+ # configure filters: fsid and (daemon name or daemon type)
+ kwargs = {
+ 'fsid': ctx.fsid,
+ ('daemon_name' if by_name else 'daemon_type'): daemon_filter,
+ }
+ # use keep_container_info to cache the ContainerInfo generated
+ # during the loop and hopefully avoid having to perform the same
+ # lookup right away.
+ _cinfo_key = '_container_info'
+ _updater = CoreStatusUpdater(keep_container_info=_cinfo_key)
+ matching_daemons = [
+ _updater.expand(ctx, entry)
+ for entry in daemons_matching(ctx, **kwargs)
+ ]
+
+ if not matching_daemons:
+ # no matches at all
+ logger.debug(
+ 'no daemons match: daemon_filter=%r, by_name=%r',
+ daemon_filter,
+ by_name,
+ )
+ return None
+ if by_name and len(matching_daemons) > 1:
+ # too many matches while searching by name
+ logger.warning(
+ f'Found multiple daemons sharing same name: {daemon_filter}'
+ )
+ # Prefer to take the first daemon we find that is actually running, or
+ # just the first in the list if none are running
+ # (key reminder: false (0) sorts before true (1))
+ matching_daemons = sorted(
+ matching_daemons, key=lambda d: d.get('state') != 'running'
+ )
+
+ matched_deamon = matching_daemons[0]
+ is_running = matched_deamon.get('state') == 'running'
+ image_name = matched_deamon.get('container_image_name', '')
+ if is_running:
+ cinfo = matched_deamon.get(_cinfo_key)
+ if cinfo:
+ # found a cached ContainerInfo while getting daemon statuses
+ return cinfo
+ return get_container_stats(
+ ctx,
+ DaemonIdentity.from_name(
+ matched_deamon['fsid'], matched_deamon['name']
+ ),
+ )
+ elif image_name:
+ # this daemon's container is not running. the regular container inspect
+ # command will not work. Fall back to inspecting the container image
+ assert isinstance(image_name, str)
+ return parsed_container_image_stats(ctx, image_name)
+ # not running, but no image name to look up!
+ logger.debug(
+ 'bad daemon state: no image, not running: %r', matched_deamon
+ )
+ return None
mon_host = {mon_host}
'''
+def _container_info(*args, **kwargs):
+ """Wrapper function for creating container info instances."""
+ import cephadmlib.container_engines
+
+ return cephadmlib.container_engines.ContainerInfo(*args, **kwargs)
+
+
@contextlib.contextmanager
def bootstrap_test_ctx(*args, **kwargs):
with with_cephadm_ctx(*args, **kwargs) as ctx:
ctx.container_engine = mock_podman()
# make sure the right image is selected when container is found
- cinfo = _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ cinfo = _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
'514e6a882f6e74806a5856468489eeff8d7106095557578da96935e4d0ba4d9d',
'2022-04-19 13:45:20.97146228 +0000 UTC',
],
("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972", "registry.hub.docker.com/rkachach/ceph:custom-v0.5", "666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4", "2022-04-19 13:45:20.97146228 +0000 UTC", ""
),
- _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
'666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
'2022-04-19 13:45:20.97146228 +0000 UTC',
],
("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972", "registry.hub.docker.com/rkachach/ceph:custom-v0.5", "666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4", "2022-04-19 13:45:20.97146228 +0000 UTC", ""
),
- _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
'666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
'2022-04-19 13:45:20.97146228 +0000 UTC',
],
("935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972", "registry.hub.docker.com/rkachach/ceph:custom-v0.5", "666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4", "2022-04-19 13:45:20.97146228 +0000 UTC", ""
),
- _cephadm.ContainerInfo('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
+ _container_info('935b549714b8f007c6a4e29c758689cf9e8e69f2e0f51180506492974b90a972',
'registry.hub.docker.com/rkachach/ceph:custom-v0.5',
'666bbfa87e8df05702d6172cae11dd7bc48efb1d94f1b9e492952f19647199a4',
'2022-04-19 13:45:20.97146228 +0000 UTC',
ValueError
)
cinfo = (
- _cephadm.ContainerInfo(*container_stats)
+ _container_info(*container_stats)
if container_stats
else None
)
_daemons.side_effect = _dms
_current_daemons[:] = [down_osd_json]
- expected_container_info = _cephadm.ContainerInfo(
+ expected_container_info = _container_info(
container_id='',
image_name='quay.io/adk3798/ceph@sha256:7da0af22ce45aac97dff00125af590506d8e36ab97d78e5175149643562bfb0b',
image_id='a03c201ff4080204949932f367545cd381c4acee0d48dbc15f2eac1e35f22318',
# than it partially being taken from the list_daemons output
up_osd_json = copy.deepcopy(down_osd_json)
up_osd_json['state'] = 'running'
- _get_stats.return_value = _cephadm.ContainerInfo('container_id', 'image_name','image_id','the_past','')
+ _get_stats.return_value = _container_info('container_id', 'image_name','image_id','the_past','')
_current_daemons[:] = [down_osd_json, up_osd_json]
- expected_container_info = _cephadm.ContainerInfo(
+ expected_container_info = _container_info(
container_id='container_id',
image_name='image_name',
image_id='image_id',