]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr/cephadm: upgrade: Make use of repo_digest
authorSebastian Wagner <sebastian.wagner@suse.com>
Mon, 3 Aug 2020 13:51:53 +0000 (15:51 +0200)
committerSebastian Wagner <sebastian.wagner@suse.com>
Wed, 2 Sep 2020 08:13:04 +0000 (10:13 +0200)
* Automatically convert tags like `:latest` to the digest
* Use the digest instead of the tag

Signed-off-by: Sebastian Wagner <sebastian.wagner@suse.com>
src/pybind/mgr/cephadm/tests/fixtures.py
src/pybind/mgr/cephadm/tests/test_upgrade.py
src/pybind/mgr/cephadm/upgrade.py
src/pybind/mgr/tests/__init__.py

index b70469cbbefdc562fcfd25ae5d3791335e65b788..8e6adc9678d2baffe655e836e5f50e787ffa82ab 100644 (file)
@@ -24,7 +24,7 @@ def get_ceph_option(_, key):
 
 def _run_cephadm(ret):
     def foo(*args, **kwargs):
-        return ret, '', 0
+        return [ret], '', 0
     return foo
 
 
index 60ed7c13fffce0288576a9fa0c3323737c7af87c..079ec5c4c139346d82205e02cbe166fafa80ccb2 100644 (file)
@@ -1,6 +1,8 @@
 import json
 from unittest import mock
 
+import pytest
+
 from ceph.deployment.service_spec import ServiceSpec
 from cephadm import CephadmOrchestrator
 from .fixtures import _run_cephadm, wait, cephadm_module, with_host, with_service
@@ -23,9 +25,16 @@ def test_upgrade_start(cephadm_module: CephadmOrchestrator):
 
 
 @mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm('{}'))
-def test_upgrade_run(cephadm_module: CephadmOrchestrator):
+@pytest.mark.parametrize("use_repo_digest",
+                         [
+                             False,
+                             True
+                         ])
+def test_upgrade_run(use_repo_digest, cephadm_module: CephadmOrchestrator):
     with with_host(cephadm_module, 'test'):
         cephadm_module.set_container_image('global', 'from_image')
+        if use_repo_digest:
+            cephadm_module.use_repo_digest = True
         with with_service(cephadm_module, ServiceSpec('mgr'), CephadmOrchestrator.apply_mgr, 'test'):
             assert wait(cephadm_module, cephadm_module.upgrade_start(
                 'to_image', None)) == 'Initiating upgrade to to_image'
@@ -41,6 +50,30 @@ def test_upgrade_run(cephadm_module: CephadmOrchestrator):
 
             cephadm_module._mon_command_mock_versions = _versions_mock
 
+            with mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(json.dumps({
+                'image_id': 'image_id',
+                'repo_digest': 'to_image@repo_digest',
+            }))):
+
+                cephadm_module.upgrade._do_upgrade()
+
+            assert cephadm_module.upgrade_status is not None
+
+            with mock.patch("cephadm.module.CephadmOrchestrator._run_cephadm", _run_cephadm(
+                json.dumps([
+                    dict(
+                        name=list(cephadm_module.cache.daemons['test'].keys())[0],
+                        style='cephadm',
+                        fsid='fsid',
+                        container_id='container_id',
+                        container_image_id='image_id',
+                        version='version',
+                        state='running',
+                    )
+                ])
+            )):
+                cephadm_module._refresh_hosts_and_daemons()
+
             cephadm_module.upgrade._do_upgrade()
 
             _, image, _ = cephadm_module.check_mon_command({
@@ -48,5 +81,7 @@ def test_upgrade_run(cephadm_module: CephadmOrchestrator):
                 'who': 'global',
                 'key': 'container_image',
             })
-
-            assert image == 'to_image'
+            if use_repo_digest:
+                assert image == 'to_image@repo_digest'
+            else:
+                assert image == 'to_image'
index 2b5759514eddda0573526f4bb02a16cba6200bcc..804f7df8760d4a87bca3fdee1c7d30b6717377db 100644 (file)
@@ -2,7 +2,7 @@ import json
 import logging
 import time
 import uuid
-from typing import TYPE_CHECKING, Optional, Dict
+from typing import TYPE_CHECKING, Optional, Dict, NamedTuple
 
 import orchestrator
 from cephadm.utils import name_to_config_section
@@ -24,22 +24,25 @@ class UpgradeState:
                  target_name: str,
                  progress_id: str,
                  target_id: Optional[str] = None,
+                 repo_digest: Optional[str] = None,
                  target_version: Optional[str] = None,
                  error: Optional[str] = None,
                  paused: Optional[bool] = None,
                  ):
-        self.target_name: str = target_name
+        self._target_name: str = target_name  # Use CephadmUpgrade.target_image instead.
         self.progress_id: str = progress_id
         self.target_id: Optional[str] = target_id
+        self.repo_digest: Optional[str] = repo_digest
         self.target_version: Optional[str] = target_version
         self.error: Optional[str] = error
         self.paused: bool = paused or False
 
     def to_json(self) -> dict:
         return {
-            'target_name': self.target_name,
+            'target_name': self._target_name,
             'progress_id': self.progress_id,
             'target_id': self.target_id,
+            'repo_digest': self.repo_digest,
             'target_version': self.target_version,
             'error': self.error,
             'paused': self.paused,
@@ -60,10 +63,20 @@ class CephadmUpgrade:
         else:
             self.upgrade_state = None
 
+    @property
+    def target_image(self) -> str:
+        assert self.upgrade_state
+        if not self.mgr.use_repo_digest:
+            return self.upgrade_state._target_name
+        if not self.upgrade_state.repo_digest:
+            return self.upgrade_state._target_name
+
+        return self.upgrade_state.repo_digest
+
     def upgrade_status(self) -> orchestrator.UpgradeStatusSpec:
         r = orchestrator.UpgradeStatusSpec()
         if self.upgrade_state:
-            r.target_image = self.upgrade_state.target_name
+            r.target_image = self.target_image
             r.in_progress = True
             if self.upgrade_state.error:
                 r.message = 'Error: ' + self.upgrade_state.error
@@ -90,15 +103,15 @@ class CephadmUpgrade:
         else:
             raise OrchestratorError('must specify either image or version')
         if self.upgrade_state:
-            if self.upgrade_state.target_name != target_name:
+            if self.upgrade_state._target_name != target_name:
                 raise OrchestratorError(
                     'Upgrade to %s (not %s) already in progress' %
-                    (self.upgrade_state.target_name, target_name))
+                    (self.upgrade_state._target_name, target_name))
             if self.upgrade_state.paused:
                 self.upgrade_state.paused = False
                 self._save_upgrade_state()
-                return 'Resumed upgrade to %s' % self.upgrade_state.target_name
-            return 'Upgrade to %s in progress' % self.upgrade_state.target_name
+                return 'Resumed upgrade to %s' % self.target_image
+            return 'Upgrade to %s in progress' % self.target_image
         self.upgrade_state = UpgradeState(
             target_name=target_name,
             progress_id=str(uuid.uuid4())
@@ -113,33 +126,33 @@ class CephadmUpgrade:
         if not self.upgrade_state:
             raise OrchestratorError('No upgrade in progress')
         if self.upgrade_state.paused:
-            return 'Upgrade to %s already paused' % self.upgrade_state.target_name
+            return 'Upgrade to %s already paused' % self.target_image
         self.upgrade_state.paused = True
         self._save_upgrade_state()
-        return 'Paused upgrade to %s' % self.upgrade_state.target_name
+        return 'Paused upgrade to %s' % self.target_image
 
     def upgrade_resume(self) -> str:
         if not self.upgrade_state:
             raise OrchestratorError('No upgrade in progress')
         if not self.upgrade_state.paused:
-            return 'Upgrade to %s not paused' % self.upgrade_state.target_name
+            return 'Upgrade to %s not paused' % self.target_image
         self.upgrade_state.paused = False
         self._save_upgrade_state()
         self.mgr.event.set()
-        return 'Resumed upgrade to %s' % self.upgrade_state.target_name
+        return 'Resumed upgrade to %s' % self.target_image
 
     def upgrade_stop(self) -> str:
         if not self.upgrade_state:
             return 'No upgrade in progress'
-        target_name = self.upgrade_state.target_name
         if self.upgrade_state.progress_id:
             self.mgr.remote('progress', 'complete',
                             self.upgrade_state.progress_id)
+        target_image = self.target_image
         self.upgrade_state = None
         self._save_upgrade_state()
         self._clear_upgrade_health_checks()
         self.mgr.event.set()
-        return 'Stopped upgrade to %s' % target_name
+        return 'Stopped upgrade to %s' % target_image
 
     def continue_upgrade(self) -> bool:
         """
@@ -196,7 +209,7 @@ class CephadmUpgrade:
             self.upgrade_state.progress_id = str(uuid.uuid4())
             self._save_upgrade_state()
         self.mgr.remote('progress', 'update', self.upgrade_state.progress_id,
-                        ev_msg='Upgrade to %s' % self.upgrade_state.target_name,
+                        ev_msg='Upgrade to %s' % self.target_image,
                         ev_progress=progress)
 
     def _save_upgrade_state(self) -> None:
@@ -224,14 +237,14 @@ class CephadmUpgrade:
             logger.debug('_do_upgrade no state, exiting')
             return
 
-        target_name = self.upgrade_state.target_name
+        target_image = self.target_image
         target_id = self.upgrade_state.target_id
-        if not target_id:
+        if not target_id or (self.mgr.use_repo_digest and not self.upgrade_state.repo_digest):
             # need to learn the container hash
-            logger.info('Upgrade: First pull of %s' % target_name)
+            logger.info('Upgrade: First pull of %s' % target_image)
             try:
                 target_id, target_version, repo_digest = self.mgr._get_container_image_info(
-                    target_name)
+                    target_image)
             except OrchestratorError as e:
                 self._fail_upgrade('UPGRADE_FAILED_PULL', {
                     'severity': 'warning',
@@ -242,9 +255,11 @@ class CephadmUpgrade:
                 return
             self.upgrade_state.target_id = target_id
             self.upgrade_state.target_version = target_version
+            self.upgrade_state.repo_digest = repo_digest
             self._save_upgrade_state()
+            target_image = self.target_image
         target_version = self.upgrade_state.target_version
-        logger.info('Upgrade: Target is %s with id %s' % (target_name,
+        logger.info('Upgrade: Target is %s with id %s' % (target_image,
                                                           target_id))
 
         image_settings = self.get_distinct_container_image_settings()
@@ -275,27 +290,27 @@ class CephadmUpgrade:
                 # make sure host has latest container image
                 out, err, code = self.mgr._run_cephadm(
                     d.hostname, '', 'inspect-image', [],
-                    image=target_name, no_fsid=True, error_ok=True)
+                    image=target_image, no_fsid=True, error_ok=True)
                 if code or json.loads(''.join(out)).get('image_id') != target_id:
-                    logger.info('Upgrade: Pulling %s on %s' % (target_name,
+                    logger.info('Upgrade: Pulling %s on %s' % (target_image,
                                                                d.hostname))
                     out, err, code = self.mgr._run_cephadm(
                         d.hostname, '', 'pull', [],
-                        image=target_name, no_fsid=True, error_ok=True)
+                        image=target_image, no_fsid=True, error_ok=True)
                     if code:
                         self._fail_upgrade('UPGRADE_FAILED_PULL', {
                             'severity': 'warning',
                             'summary': 'Upgrade: failed to pull target image',
                             'count': 1,
                             'detail': [
-                                'failed to pull %s on host %s' % (target_name,
+                                'failed to pull %s on host %s' % (target_image,
                                                                   d.hostname)],
                         })
                         return
                     r = json.loads(''.join(out))
                     if r.get('image_id') != target_id:
                         logger.info('Upgrade: image %s pull on %s got new image %s (not %s), restarting' % (
-                            target_name, d.hostname, r['image_id'], target_id))
+                            target_image, d.hostname, r['image_id'], target_id))
                         self.upgrade_state.target_id = r['image_id']
                         self._save_upgrade_state()
                         return
@@ -303,7 +318,7 @@ class CephadmUpgrade:
                 self._update_upgrade_progress(done / len(daemons))
 
                 if not d.container_image_id:
-                    if d.container_image_name == target_name:
+                    if d.container_image_name == target_image:
                         logger.debug(
                             'daemon %s has unknown container_image_id but has correct image name' % (d.name()))
                         continue
@@ -316,7 +331,7 @@ class CephadmUpgrade:
                     d.daemon_id,
                     d.hostname,
                     'redeploy',
-                    image=target_name
+                    image=target_image
                 )
                 return
 
@@ -353,10 +368,10 @@ class CephadmUpgrade:
                         (count, daemon_type, version, target_version))
 
             # push down configs
-            if image_settings.get(daemon_type) != target_name:
+            if image_settings.get(daemon_type) != target_image:
                 logger.info('Upgrade: Setting container_image for all %s...' %
                             daemon_type)
-                self.mgr.set_container_image(name_to_config_section(daemon_type), target_name)
+                self.mgr.set_container_image(name_to_config_section(daemon_type), target_image)
             to_clean = []
             for section in image_settings.keys():
                 if section.startswith(name_to_config_section(daemon_type) + '.'):
@@ -376,7 +391,7 @@ class CephadmUpgrade:
 
         # clean up
         logger.info('Upgrade: Finalizing container_image settings')
-        self.mgr.set_container_image('global', target_name)
+        self.mgr.set_container_image('global', target_image)
 
         for daemon_type in CEPH_UPGRADE_ORDER:
             ret, image, err = self.mgr.check_mon_command({
index 26354dc2cf57b798fb6fe8f476f66114b748b298..6e052b11f38bd5b5714d0c6dec60c46e89f50ef0 100644 (file)
@@ -165,6 +165,7 @@ if 'UNITTEST' in os.environ:
             self._unconfigure_logging = mock.MagicMock()
             self._ceph_log = mock.MagicMock()
             self._ceph_dispatch_remote = lambda *_: None
+            self._ceph_get_mgr_id = mock.MagicMock()
 
 
     cm = mock.Mock()