]> git.apps.os.sepia.ceph.com Git - ceph.git/commitdiff
mgr/cephadm: update timestamp on repeat daemon/service events 56080/head
authorAdam King <adking@redhat.com>
Wed, 18 Oct 2023 18:00:05 +0000 (14:00 -0400)
committerAdam King <adking@redhat.com>
Wed, 13 Mar 2024 14:27:56 +0000 (10:27 -0400)
If you have a daemon/service event and then an identical
event happens later (e.g. the same daemon is redeployed
multiple times) the events are not updated on the repeat
instances. In cases like this I think it makes more
sense to update the timestamp so users can see the most
recent time the event happened.

Fixes: https://tracker.ceph.com/issues/63238
Signed-off-by: Adam King <adking@redhat.com>
(cherry picked from commit 13512cc202c90abd6c5f1e2747d121cc07689d1b)

src/pybind/mgr/cephadm/inventory.py
src/pybind/mgr/cephadm/tests/test_cephadm.py

index f83e687031c87a20d78fe92d121cbe06d291250c..03a3f1218567cbda39534212606888224b8316a9 100644 (file)
@@ -1469,6 +1469,8 @@ class EventStore():
 
         for e in self.events[event.kind_subject()]:
             if e.message == event.message:
+                # if subject and message match, just update the timestamp
+                e.created = event.created
                 return
 
         self.events[event.kind_subject()].append(event)
index 8e09eb9400116679093bbc5b7eb10c05ec756587..a9795c8980aa35e4c819da90cd818d6fd9bd12ef 100644 (file)
@@ -21,7 +21,7 @@ from ceph.deployment.service_spec import ServiceSpec, PlacementSpec, RGWSpec, \
     CustomConfig, PrometheusSpec
 from ceph.deployment.drive_selection.selector import DriveSelection
 from ceph.deployment.inventory import Devices, Device
-from ceph.utils import datetime_to_str, datetime_now
+from ceph.utils import datetime_to_str, datetime_now, str_to_datetime
 from orchestrator import DaemonDescription, InventoryHost, \
     HostSpec, OrchestratorError, DaemonDescriptionStatus, OrchestratorEvent
 from tests import mock
@@ -394,6 +394,42 @@ class TestCephadm(object):
 
                     assert 'myerror' in ''.join(evs)
 
+    @mock.patch("cephadm.serve.CephadmServe._run_cephadm", _run_cephadm('[]'))
+    def test_daemon_action_event_timestamp_update(self, cephadm_module: CephadmOrchestrator):
+        # Test to make sure if a new daemon event is created with the same subject
+        # and message that the timestamp of the event is updated to let users know
+        # when it most recently occurred.
+        cephadm_module.service_cache_timeout = 10
+        with with_host(cephadm_module, 'test'):
+            with with_service(cephadm_module, RGWSpec(service_id='myrgw.foobar', unmanaged=True)) as _, \
+                    with_daemon(cephadm_module, RGWSpec(service_id='myrgw.foobar'), 'test') as daemon_id:
+
+                d_name = 'rgw.' + daemon_id
+
+                now = str_to_datetime('2023-10-18T22:45:29.119250Z')
+                with mock.patch("cephadm.inventory.datetime_now", lambda: now):
+                    c = cephadm_module.daemon_action('redeploy', d_name)
+                    assert wait(cephadm_module,
+                                c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
+
+                    CephadmServe(cephadm_module)._check_daemons()
+
+                d_events = cephadm_module.events.get_for_daemon(d_name)
+                assert len(d_events) == 1
+                assert d_events[0].created == now
+
+                later = str_to_datetime('2023-10-18T23:46:37.119250Z')
+                with mock.patch("cephadm.inventory.datetime_now", lambda: later):
+                    c = cephadm_module.daemon_action('redeploy', d_name)
+                    assert wait(cephadm_module,
+                                c) == f"Scheduled to redeploy rgw.{daemon_id} on host 'test'"
+
+                    CephadmServe(cephadm_module)._check_daemons()
+
+                d_events = cephadm_module.events.get_for_daemon(d_name)
+                assert len(d_events) == 1
+                assert d_events[0].created == later
+
     @pytest.mark.parametrize(
         "action",
         [