ctx.image = _get_default_image(ctx)
-def get_container_info(ctx: CephadmContext, daemon_filter: str, by_name: bool) -> Optional[ContainerInfo]:
+def get_container_info(
+ ctx: CephadmContext, daemon_filter: str, by_name: bool
+) -> Optional[ContainerInfo]:
"""
:param ctx: Cephadm context
:param daemon_filter: daemon name or type
:param by_name: must be set to True if daemon name is provided
:return: Container information or None
"""
- def daemon_name_or_type(daemon: Dict[str, str]) -> str:
- return daemon['name'] if by_name else daemon['name'].split('.', 1)[0]
-
if by_name and '.' not in daemon_filter:
- logger.warning(f'Trying to get container info using invalid daemon name {daemon_filter}')
+ logger.warning(
+ f'Trying to get container info using invalid daemon name {daemon_filter}'
+ )
return None
- if by_name:
- # NOTE: we are not passing detail=False to this list_daemons call
- # as we want the container_image name in the case where we are
- # doing this by name and this is skipped when detail=False
- matching_daemons = list_daemons(ctx, daemon_name=daemon_filter)
- if len(matching_daemons) > 1:
- logger.warning(f'Found multiple daemons sharing same name: {daemon_filter}')
- # Take the first daemon we find that is actually running, or just the
- # first in the list if none are running
- matched_daemon = None
- for d in matching_daemons:
- if 'state' in d and d['state'] == 'running':
- matched_daemon = d
- break
- if not matched_daemon:
- matched_daemon = matching_daemons[0]
- matching_daemons = [matched_daemon]
- else:
- # NOTE: we are passing detail=False here as in this case where we are not
- # doing it by_name, we really only need the names of the daemons. Additionally,
- # when not doing it by_name, we are getting the info for all daemons on the
- # host, and doing this with detail=True tends to be slow.
- daemons = list_daemons(ctx, detail=False)
- matching_daemons = [d for d in daemons if daemon_name_or_type(d) == daemon_filter and d['fsid'] == ctx.fsid]
- if matching_daemons:
- if (
- by_name
- and 'state' in matching_daemons[0]
- and matching_daemons[0]['state'] != 'running'
- and 'container_image_name' in matching_daemons[0]
- and matching_daemons[0]['container_image_name']
- ):
- # this daemon contianer is not running so the regular `podman/docker inspect` on the
- # container will not help us. If we have the image name from the list_daemons output
- # we can try that.
- image_name = matching_daemons[0]['container_image_name']
- cinfo = parsed_container_image_stats(ctx, image_name)
- if cinfo:
- return cinfo
- else:
- d_type, d_id = matching_daemons[0]['name'].split('.', 1)
- cinfo = get_container_stats(
- ctx, DaemonIdentity(ctx.fsid, d_type, d_id)
- )
- if cinfo:
- return cinfo
+
+ # configure filters: fsid and (daemon name or daemon type)
+ kwargs = {
+ 'fsid': ctx.fsid,
+ ('daemon_name' if by_name else 'daemon_type'): daemon_filter,
+ }
+ # use keep_container_info to cache the ContainerInfo generated
+ # during the loop and hopefully avoid having to perform the same
+ # lookup right away.
+ _cinfo_key = '_container_info'
+ _updater = CoreStatusUpdater(keep_container_info=_cinfo_key)
+ matching_daemons = [
+ _updater.expand(ctx, entry)
+ for entry in daemons_matching(ctx, **kwargs)
+ ]
+
+ if not matching_daemons:
+ # no matches at all
+ logger.debug(
+ 'no daemons match: daemon_filter=%r, by_name=%r',
+ daemon_filter,
+ by_name,
+ )
+ return None
+ if by_name and len(matching_daemons) > 1:
+ # too many matches while searching by name
+ logger.warning(
+ f'Found multiple daemons sharing same name: {daemon_filter}'
+ )
+ # Prefer to take the first daemon we find that is actually running, or
+ # just the first in the list if none are running
+ # (key reminder: false (0) sorts before true (1))
+ matching_daemons = sorted(
+ matching_daemons, key=lambda d: d.get('state') != 'running'
+ )
+
+ matched_deamon = matching_daemons[0]
+ is_running = matched_deamon.get('state') == 'running'
+ image_name = matched_deamon.get('container_image_name', '')
+ if is_running:
+ cinfo = matched_deamon.get(_cinfo_key)
+ if cinfo:
+ # found a cached ContainerInfo while getting daemon statuses
+ return cinfo
+ return get_container_stats(
+ ctx,
+ DaemonIdentity.from_name(
+ matched_deamon['fsid'], matched_deamon['name']
+ ),
+ )
+ elif image_name:
+ # this daemon's container is not running. the regular container inspect
+ # command will not work. Fall back to inspecting the container image
+ assert isinstance(image_name, str)
+ return parsed_container_image_stats(ctx, image_name)
+ # not running, but no image name to look up!
+ logger.debug(
+ 'bad daemon state: no image, not running: %r', matched_deamon
+ )
return None
output,
funkypatch,
):
+ import cephadmlib.listing
+ import cephadmlib.daemon_identity
+
ctx = _cephadm.CephadmContext()
ctx.fsid = '00000000-0000-0000-0000-0000deadbeef'
ctx.container_engine = mock_podman()
- funkypatch.patch('cephadm.list_daemons').return_value = daemon_list
+
+ def _dms(*args, **kwargs):
+ for val in daemon_list:
+ yield cephadmlib.listing.DaemonEntry(
+ cephadmlib.daemon_identity.DaemonIdentity.from_name(
+ val['fsid'], val['name']
+ ),
+ val,
+ '',
+ )
+
+ funkypatch.patch('cephadmlib.listing.daemons').side_effect = _dms
+ funkypatch.patch('cephadmlib.systemd.check_unit').return_value = (
+ True,
+ 'running' if container_stats else 'stopped',
+ '',
+ )
+ funkypatch.patch('cephadmlib.call_wrappers.call').side_effect = (
+ ValueError
+ )
cinfo = (
_cephadm.ContainerInfo(*container_stats)
if container_stats
)
def test_get_container_info_daemon_down(self, funkypatch):
+ import cephadmlib.listing
+ import cephadmlib.daemon_identity
+ funkypatch.patch('cephadmlib.systemd.check_unit').return_value = (True, 'stopped', '')
+ funkypatch.patch('cephadmlib.call_wrappers.call').side_effect = ValueError
_get_stats_by_name = funkypatch.patch('cephadmlib.container_engines.parsed_container_image_stats')
_get_stats = funkypatch.patch('cephadmlib.container_types.get_container_stats')
- _list_daemons = funkypatch.patch('cephadm.list_daemons')
+ _daemons = funkypatch.patch('cephadmlib.listing.daemons')
+
+ class FakeUpdater(cephadmlib.listing.NoOpDaemonStatusUpdater):
+ def __init__(self, *args, **kwargs):
+ pass
+ funkypatch.patch('cephadmlib.listing_updaters.CoreStatusUpdater', dest=FakeUpdater)
ctx = _cephadm.CephadmContext()
ctx.fsid = '5e39c134-dfc5-11ee-a344-5254000ee071'
"deployed": "2024-03-11T17:37:23.520061Z",
"configured": "2024-03-11T17:37:28.494075Z"
}
- _list_daemons.return_value = [down_osd_json]
+
+ _current_daemons = []
+ def _dms(*args, **kwargs):
+ for d in _current_daemons:
+ ident = cephadmlib.daemon_identity.DaemonIdentity.from_name(d['fsid'], d['name'])
+ yield cephadmlib.listing.DaemonEntry(ident, d, '')
+ _daemons.side_effect = _dms
+ _current_daemons[:] = [down_osd_json]
expected_container_info = _cephadm.ContainerInfo(
container_id='',
up_osd_json = copy.deepcopy(down_osd_json)
up_osd_json['state'] = 'running'
_get_stats.return_value = _cephadm.ContainerInfo('container_id', 'image_name','image_id','the_past','')
- _list_daemons.return_value = [down_osd_json, up_osd_json]
+ _current_daemons[:] = [down_osd_json, up_osd_json]
expected_container_info = _cephadm.ContainerInfo(
container_id='container_id',