estimates. This feature is enabled by default for relevant commands including
scan_extents, scan_inodes, and other state-changing operations.
Related Tracker: https://tracker.ceph.com/issues/63191
+* RBD: Fixed incorrect behavior of the "start-time" argument for mirror
+ snapshot and trash purge schedules, where it previously offset the schedule
+ anchor instead of defining it. The argument now requires an ISO 8601
+ date-time. The `schedule ls` output displays the start time in UTC, including
+ the date and time in the format "%Y-%m-%d %H:%M:00". The `schedule status`
+ output now displays the next schedule time in UTC.
>=20.0.0
To create a mirror snapshot schedule for an image::
- rbd mirror snapshot schedule add --pool mypool --image myimage 12h 14:00:00-05:00
+ rbd mirror snapshot schedule add --pool mypool --image myimage 12h 2020-01-14T11:30+05:30
Availability
============
rbd mirror snapshot schedule add [--pool {pool-name}] [--image {image-name}] {interval} [{start-time}]
The ``interval`` can be specified in days, hours, or minutes using ``d``, ``h``,
-``m`` suffix respectively. The optional ``start-time`` can be specified using
-the ISO 8601 time format. For example::
+``m`` suffix respectively. The optional ``start-time`` must be specified in
+the ISO 8601 time format. If no UTC offset is provided, UTC is assumed. For
+example::
- $ rbd --cluster site-a mirror snapshot schedule add --pool image-pool 24h 14:00:00-05:00
+ $ rbd --cluster site-a mirror snapshot schedule add --pool image-pool 24h 2020-01-14T11:30+05:30
$ rbd --cluster site-a mirror snapshot schedule add --pool image-pool --image image1 6h
To remove a mirror-snapshot schedules with ``rbd``, specify the
To list all snapshot schedules for a specific level (global, pool, or image)
with ``rbd``, specify the ``mirror snapshot schedule ls`` command along with
an optional pool or image name. Additionally, the ``--recursive`` option can
-be specified to list all schedules at the specified level and below. For
-example::
+be specified to list all schedules at the specified level and below.
+
+Schedule start times are always displayed in UTC. For example::
$ rbd --cluster site-a mirror snapshot schedule ls --pool image-pool --recursive
POOL NAMESPACE IMAGE SCHEDULE
- image-pool - - every 1d starting at 14:00:00-05:00
+ image-pool - - every 1d starting at 2020-01-14 06:00:00
image-pool image1 every 6h
To view the status for when the next snapshots will be created for
rbd mirror snapshot schedule status [--pool {pool-name}] [--image {image-name}]
-For example::
+The next schedule time is always displayed in UTC. For example::
$ rbd --cluster site-a mirror snapshot schedule status
SCHEDULE TIME IMAGE
- 2020-02-26 18:00:00 image-pool/image1
+ 2026-01-24 06:00:00 image-pool/image1
+
Disable Image Mirroring
-----------------------
expect_fail rbd trash purge schedule remove -p rbd dummy
expect_fail rbd trash purge schedule remove -p rbd 1d dummy
- rbd trash purge schedule add -p rbd 1d 01:30
+ rbd trash purge schedule add -p rbd 1h 2100-01-01T19:00Z
+ test "$(rbd trash purge schedule ls -p rbd)" = 'every 1h starting at 2100-01-01 19:00:00'
+ for i in `seq 12`; do
+ rbd trash purge schedule status -p rbd | grep '2100-01-01 19:00:00' && break
+ sleep 10
+ done
+ test "$(rbd trash purge schedule status -p rbd --format xml |
+ xmlstarlet sel -t -v '//scheduled/item/schedule_time')" = '2100-01-01 19:00:00'
+ rbd trash purge schedule rm -p rbd
- rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
+ rbd trash purge schedule add -p rbd 1d 2020-01-14T07:00+05:30
+ rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 2020-01-14 01:30:00'
expect_fail rbd trash purge schedule ls
- rbd trash purge schedule ls -R | grep 'every 1d starting at 01:30'
- rbd trash purge schedule ls -R -p rbd | grep 'every 1d starting at 01:30'
+ rbd trash purge schedule ls -R | grep 'every 1d starting at 2020-01-14 01:30:00'
+ rbd trash purge schedule ls -R -p rbd | grep 'every 1d starting at 2020-01-14 01:30:00'
expect_fail rbd trash purge schedule ls -p rbd2
test "$(rbd trash purge schedule ls -p rbd2 -R --format json)" = "[]"
test "$(rbd trash purge schedule status -p rbd --format xml |
xmlstarlet sel -t -v '//scheduled/item/pool')" = 'rbd'
- rbd trash purge schedule add 2d 00:17
- rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
- rbd trash purge schedule ls -R | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule add 2d 2020-01-14T05:47+05:30
+ rbd trash purge schedule ls | grep 'every 2d starting at 2020-01-14 00:17:00'
+ rbd trash purge schedule ls -R | grep 'every 2d starting at 2020-01-14 00:17:00'
expect_fail rbd trash purge schedule ls -p rbd2
- rbd trash purge schedule ls -p rbd2 -R | grep 'every 2d starting at 00:17'
- rbd trash purge schedule ls -p rbd2/ns1 -R | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls -p rbd2 -R | grep 'every 2d starting at 2020-01-14 00:17:00'
+ rbd trash purge schedule ls -p rbd2/ns1 -R | grep 'every 2d starting at 2020-01-14 00:17:00'
test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
xmlstarlet sel -t -v '//schedules/schedule/pool')" = "-"
test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
xmlstarlet sel -t -v '//schedules/schedule/namespace')" = "-"
test "$(rbd trash purge schedule ls -R -p rbd2/ns1 --format xml |
- xmlstarlet sel -t -v '//schedules/schedule/items/item/start_time')" = "00:17:00"
+ xmlstarlet sel -t -v '//schedules/schedule/items/item/start_time')" = "2020-01-14 00:17:00"
for i in `seq 12`; do
rbd trash purge schedule status --format xml |
xmlstarlet sel -t -v '//scheduled/item/pool'))" = 'rbd2 rbd2'
test "$(echo $(rbd trash purge schedule ls -R --format xml |
- xmlstarlet sel -t -v '//schedules/schedule/items'))" = "2d00:17:00 1d01:30:00"
+ xmlstarlet sel -t -v '//schedules/schedule/items/item'))" = "2d2020-01-14 00:17:00 1d2020-01-14 01:30:00"
rbd trash purge schedule add 1d
- rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
+ rbd trash purge schedule ls | grep 'every 2d starting at 2020-01-14 00:17:00'
rbd trash purge schedule ls | grep 'every 1d'
rbd trash purge schedule ls -R --format xml |
- xmlstarlet sel -t -v '//schedules/schedule/items' | grep '2d00:17'
+ xmlstarlet sel -t -v '//schedules/schedule/items' | grep '2d2020-01-14 00:17:00'
rbd trash purge schedule rm 1d
- rbd trash purge schedule ls | grep 'every 2d starting at 00:17'
- rbd trash purge schedule rm 2d 00:17
+ rbd trash purge schedule ls | grep 'every 2d starting at 2020-01-14 00:17:00'
+ rbd trash purge schedule rm 2d 2020-01-14T00:17:00
expect_fail rbd trash purge schedule ls
for p in rbd2 rbd2/ns1; do
expect_fail rbd trash purge schedule remove -p rbd 1d dummy
expect_fail rbd trash purge schedule remove dummy
expect_fail rbd trash purge schedule remove 1d dummy
- rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 01:30'
+ expect_fail rbd trash purge schedule add -p rbd 30m 00:15
+ expect_fail rbd trash purge schedule add -p rbd 30m 00:15+05:30
+ expect_fail rbd trash purge schedule add -p rbd 30m 2020-13-14T00:15+05:30
+ expect_fail rbd trash purge schedule add -p rbd 30m 2020-01-32T00:15+05:30
+ expect_fail rbd trash purge schedule add -p rbd 30m 2020-01-14T25:15+05:30
+ expect_fail rbd trash purge schedule add -p rbd 30m 2020-01-14T00:60+05:30
+ expect_fail rbd trash purge schedule add -p rbd 30m 2020-01-14T00:15+24:00
+
+ rbd trash purge schedule ls -p rbd | grep 'every 1d starting at 2020-01-14 01:30:00'
rbd trash purge schedule ls | grep 'every 2m'
- rbd trash purge schedule remove -p rbd 1d 01:30
+ rbd trash purge schedule remove -p rbd 1d 2020-01-14T01:30
rbd trash purge schedule remove 2m
test "$(rbd trash purge schedule ls -R --format json)" = "[]"
expect_fail rbd mirror snapshot schedule remove -p rbd2/ns1 --image test1 dummy
expect_fail rbd mirror snapshot schedule remove -p rbd2/ns1 --image test1 1h dummy
+ rbd mirror snapshot schedule add -p rbd2/ns1 1h 2100-01-01T19:00Z
+ test "$(rbd mirror snapshot schedule ls -p rbd2/ns1)" = 'every 1h starting at 2100-01-01 19:00:00'
+ for i in `seq 12`; do
+ rbd mirror snapshot schedule status -p rbd2/ns1 | grep '2100-01-01 19:00:00' && break
+ sleep 10
+ done
+ test "$(rbd mirror snapshot schedule status -p rbd2/ns1 --format xml |
+ xmlstarlet sel -t -v '//scheduled_images/image/schedule_time')" = '2100-01-01 19:00:00'
+ rbd mirror snapshot schedule rm -p rbd2/ns1
+
rbd mirror snapshot schedule add -p rbd2/ns1 --image test1 1m
expect_fail rbd mirror snapshot schedule ls
rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
done
rbd mirror snapshot schedule status | grep 'rbd2/ns1/test1'
- rbd mirror snapshot schedule add 1h 00:15
- test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
- rbd mirror snapshot schedule ls -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule add 1h 2020-01-14T04:30+05:30
+ test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 2020-01-13 23:00:00'
+ rbd mirror snapshot schedule ls -R | grep 'every 1h starting at 2020-01-13 23:00:00'
rbd mirror snapshot schedule ls -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2
- rbd mirror snapshot schedule ls -p rbd2 -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -p rbd2 -R | grep 'every 1h starting at 2020-01-13 23:00:00'
rbd mirror snapshot schedule ls -p rbd2 -R | grep 'rbd2 *ns1 *test1 *every 1m'
expect_fail rbd mirror snapshot schedule ls -p rbd2/ns1
- rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'every 1h starting at 00:15:00'
+ rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'every 1h starting at 2020-01-13 23:00:00'
rbd mirror snapshot schedule ls -p rbd2/ns1 -R | grep 'rbd2 *ns1 *test1 *every 1m'
test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
expect_fail rbd mirror snapshot schedule remove 1h dummy
expect_fail rbd mirror snapshot schedule remove -p rbd2/ns1 --image test1 dummy
expect_fail rbd mirror snapshot schedule remove -p rbd2/ns1 --image test1 1h dummy
- test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 00:15:00'
+ expect_fail rbd mirror snapshot schedule add 30m 04:30
+ expect_fail rbd mirror snapshot schedule add 30m 04:30+05:30
+ expect_fail rbd mirror snapshot schedule add 30m 2020-13-14T04:30+05:30
+ expect_fail rbd mirror snapshot schedule add 30m 2020-01-32T04:30+05:30
+ expect_fail rbd mirror snapshot schedule add 30m 2020-01-14T25:30+05:30
+ expect_fail rbd mirror snapshot schedule add 30m 2020-01-14T04:60+05:30
+ expect_fail rbd mirror snapshot schedule add 30m 2020-01-14T04:30+24:00
+ test "$(rbd mirror snapshot schedule ls)" = 'every 1h starting at 2020-01-13 23:00:00'
test "$(rbd mirror snapshot schedule ls -p rbd2/ns1 --image test1)" = 'every 1m'
rbd rm rbd2/ns1/test1
import rbd
import traceback
-from datetime import datetime
+from datetime import datetime, timezone
from threading import Condition, Lock, Thread
from typing import Any, Dict, List, NamedTuple, Optional, Set, Tuple, Union
self.condition = Condition(self.lock)
self.module = module
self.log = module.log
- self.last_refresh_images = datetime(1970, 1, 1)
+ self.last_refresh_images = datetime(1970, 1, 1, tzinfo=timezone.utc)
self.create_snapshot_requests = CreateSnapshotRequests(self)
self.stop_thread = False
pool_id, namespace, image_id = image_spec
self.create_snapshot_requests.add(pool_id, namespace, image_id)
with self.lock:
- self.enqueue(datetime.now(), pool_id, namespace, image_id)
+ self.enqueue(datetime.now(timezone.utc), pool_id, namespace, image_id)
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
self.log.exception("MirrorSnapshotScheduleHandler: client blocklisted")
def init_schedule_queue(self) -> None:
# schedule_time => image_spec
- self.queue: Dict[str, List[ImageSpec]] = {}
+ self.queue: Dict[datetime, List[ImageSpec]] = {}
# pool_id => {namespace => image_id}
self.images: Dict[str, Dict[str, Dict[str, str]]] = {}
self.schedules = Schedules(self)
self.schedules.load(namespace_validator, image_validator)
def refresh_images(self) -> float:
- elapsed = (datetime.now() - self.last_refresh_images).total_seconds()
+ elapsed = (datetime.now(timezone.utc) - self.last_refresh_images).total_seconds()
if elapsed < self.REFRESH_DELAY_SECONDS:
return self.REFRESH_DELAY_SECONDS - elapsed
self.log.debug("MirrorSnapshotScheduleHandler: no schedules")
self.images = {}
self.queue = {}
- self.last_refresh_images = datetime.now()
+ self.last_refresh_images = datetime.now(timezone.utc)
return self.REFRESH_DELAY_SECONDS
images: Dict[str, Dict[str, Dict[str, str]]] = {}
self.refresh_queue(images)
self.images = images
- self.last_refresh_images = datetime.now()
+ self.last_refresh_images = datetime.now(timezone.utc)
return self.REFRESH_DELAY_SECONDS
def load_pool_images(self,
pool_name, e))
def rebuild_queue(self) -> None:
- now = datetime.now()
-
# don't remove from queue "due" images
- now_string = datetime.strftime(now, "%Y-%m-%d %H:%M:00")
-
+ now = datetime.now(timezone.utc)
for schedule_time in list(self.queue):
- if schedule_time > now_string:
+ if schedule_time > now:
del self.queue[schedule_time]
if not self.schedules:
def refresh_queue(self,
current_images: Dict[str, Dict[str, Dict[str, str]]]) -> None:
- now = datetime.now()
+ now = datetime.now(timezone.utc)
for pool_id in self.images:
for namespace in self.images[pool_id]:
if not self.queue:
return None, 1000.0
- now = datetime.now()
- schedule_time = sorted(self.queue)[0]
+ now = datetime.now(timezone.utc)
+ schedule_time = min(self.queue)
- if datetime.strftime(now, "%Y-%m-%d %H:%M:%S") < schedule_time:
- wait_time = (datetime.strptime(schedule_time,
- "%Y-%m-%d %H:%M:%S") - now)
- return None, wait_time.total_seconds()
+ if now < schedule_time:
+ return None, (schedule_time - now).total_seconds()
images = self.queue[schedule_time]
image = images.pop(0)
continue
image_name = self.images[pool_id][namespace][image_id]
scheduled_images.append({
- 'schedule_time': schedule_time,
+ 'schedule_time': schedule_time.strftime("%Y-%m-%d %H:%M:00"),
'image': image_name
})
return 0, json.dumps({'scheduled_images': scheduled_images},
-import datetime
import json
import rados
import rbd
import re
-from dateutil.parser import parse
-from typing import cast, Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING
+from datetime import date, datetime, timezone, timedelta
+from dateutil.parser import parse, isoparse
+from typing import Any, Callable, Dict, List, Optional, Set, Tuple, TYPE_CHECKING
from .common import get_rbd_pools
if TYPE_CHECKING:
class StartTime:
- def __init__(self,
- hour: int,
- minute: int,
- tzinfo: Optional[datetime.tzinfo]) -> None:
- self.time = datetime.time(hour, minute, tzinfo=tzinfo)
- self.minutes = self.time.hour * 60 + self.time.minute
- if self.time.tzinfo:
- utcoffset = cast(datetime.timedelta, self.time.utcoffset())
- self.minutes += int(utcoffset.seconds / 60)
-
- def __eq__(self, start_time: Any) -> bool:
- return self.minutes == start_time.minutes
+ def __init__(self, dt: datetime) -> None:
+ self.dt = self._to_utc(dt)
+
+ @staticmethod
+ def _to_utc(dt: datetime) -> datetime:
+ if dt.tzinfo is None:
+ return dt.replace(tzinfo=timezone.utc, second=0, microsecond=0)
+ return dt.astimezone(timezone.utc).replace(second=0, microsecond=0)
+
+ def __eq__(self, other: Any) -> bool:
+ return self.dt == other.dt
def __hash__(self) -> int:
- return hash(self.minutes)
+ return hash(self.dt)
def to_string(self) -> str:
- return self.time.isoformat()
+ return self.dt.strftime("%Y-%m-%d %H:%M:00")
@classmethod
- def from_string(cls, start_time: Optional[str]) -> Optional['StartTime']:
+ def from_string(cls,
+ start_time: Optional[str],
+ allow_legacy: bool = False) -> Optional['StartTime']:
if not start_time:
return None
try:
- t = parse(start_time).timetz()
+ dt = isoparse(start_time)
except ValueError as e:
- raise ValueError("Invalid start time {}: {}".format(start_time, e))
+ if not allow_legacy:
+ raise ValueError("Invalid start time {}: {}".format(start_time, e))
+
+ try:
+ t = parse(start_time).timetz()
+ except ValueError as e:
+ raise ValueError("Invalid legacy start time {}: {}".format(start_time, e))
+
+ dt = datetime.combine(date(1970, 1, 1), t)
- return StartTime(t.hour, t.minute, tzinfo=t.tzinfo)
+ return cls(dt)
class Schedule:
start_time: Optional[StartTime] = None) -> None:
self.items.discard((interval, start_time))
- def next_run(self, now: datetime.datetime) -> str:
+ def next_run(self, now: datetime) -> datetime:
schedule_time = None
- for interval, opt_start in self.items:
- period = datetime.timedelta(minutes=interval.minutes)
- start_time = datetime.datetime(1970, 1, 1)
- if opt_start:
- start = cast(StartTime, opt_start)
- start_time += datetime.timedelta(minutes=start.minutes)
- time = start_time + \
- (int((now - start_time) / period) + 1) * period
- if schedule_time is None or time < schedule_time:
- schedule_time = time
+
+ for interval, start_time in self.items:
+ period = timedelta(minutes=interval.minutes)
+ anchor_time = start_time.dt if start_time else datetime(1970, 1, 1, tzinfo=timezone.utc)
+
+ if anchor_time > now:
+ candidate_time = anchor_time
+ else:
+ q, r = divmod(now - anchor_time, period)
+ candidate_time = anchor_time + (q + bool(r)) * period
+
+ if schedule_time is None or candidate_time < schedule_time:
+ schedule_time = candidate_time
+
if schedule_time is None:
raise ValueError('no items is added')
- return datetime.datetime.strftime(schedule_time, "%Y-%m-%d %H:%M:00")
+
+ return schedule_time
def to_list(self) -> List[Dict[str, Optional[str]]]:
- def item_to_dict(interval: Interval,
- start_time: Optional[StartTime]) -> Dict[str, Optional[str]]:
- if start_time:
- schedule_start_time: Optional[str] = start_time.to_string()
- else:
- schedule_start_time = None
- return {SCHEDULE_INTERVAL: interval.to_string(),
- SCHEDULE_START_TIME: schedule_start_time}
- return [item_to_dict(interval, start_time)
- for interval, start_time in self.items]
+ return [
+ {
+ SCHEDULE_INTERVAL: interval.to_string(),
+ SCHEDULE_START_TIME: start_time.to_string() if start_time else None
+ }
+ for interval, start_time in self.items
+ ]
def to_json(self) -> str:
return json.dumps(self.to_list(), indent=4, sort_keys=True)
schedule = Schedule(name)
for item in items:
interval = Interval.from_string(item[SCHEDULE_INTERVAL])
- start_time = item[SCHEDULE_START_TIME] and \
- StartTime.from_string(item[SCHEDULE_START_TIME]) or None
+ # Allow loading 'start_time' values in legacy format for backwards compatibility
+ start_time = StartTime.from_string(
+ item.get(SCHEDULE_START_TIME), allow_legacy=True)
schedule.add(interval, start_time)
return schedule
except json.JSONDecodeError as e:
import rbd
import traceback
-from datetime import datetime
+from datetime import datetime, timezone
from threading import Condition, Lock, Thread
from typing import Any, Dict, List, Optional, Tuple
self.condition = Condition(self.lock)
self.module = module
self.log = module.log
- self.last_refresh_pools = datetime(1970, 1, 1)
+ self.last_refresh_pools = datetime(1970, 1, 1, tzinfo=timezone.utc)
self.stop_thread = False
self.thread = Thread(target=self.run)
pool_id, namespace = ns_spec
self.trash_purge(pool_id, namespace)
with self.lock:
- self.enqueue(datetime.now(), pool_id, namespace)
+ self.enqueue(datetime.now(timezone.utc), pool_id, namespace)
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
self.log.exception("TrashPurgeScheduleHandler: client blocklisted")
try:
with self.module.rados.open_ioctx2(int(pool_id)) as ioctx:
ioctx.set_namespace(namespace)
- rbd.RBD().trash_purge(ioctx, datetime.now())
+ rbd.RBD().trash_purge(ioctx, datetime.now(timezone.utc))
except (rados.ConnectionShutdown, rbd.ConnectionShutdown):
raise
except Exception as e:
pool_id, namespace, e))
def init_schedule_queue(self) -> None:
- self.queue: Dict[str, List[Tuple[str, str]]] = {}
+ self.queue: Dict[datetime, List[Tuple[str, str]]] = {}
# pool_id => {namespace => pool_name}
self.pools: Dict[str, Dict[str, str]] = {}
self.schedules = Schedules(self)
self.schedules.load()
def refresh_pools(self) -> float:
- elapsed = (datetime.now() - self.last_refresh_pools).total_seconds()
+ elapsed = (datetime.now(timezone.utc) - self.last_refresh_pools).total_seconds()
if elapsed < self.REFRESH_DELAY_SECONDS:
return self.REFRESH_DELAY_SECONDS - elapsed
self.log.debug("TrashPurgeScheduleHandler: no schedules")
self.pools = {}
self.queue = {}
- self.last_refresh_pools = datetime.now()
+ self.last_refresh_pools = datetime.now(timezone.utc)
return self.REFRESH_DELAY_SECONDS
pools: Dict[str, Dict[str, str]] = {}
self.refresh_queue(pools)
self.pools = pools
- self.last_refresh_pools = datetime.now()
+ self.last_refresh_pools = datetime.now(timezone.utc)
return self.REFRESH_DELAY_SECONDS
def load_pool(self, ioctx: rados.Ioctx, pools: Dict[str, Dict[str, str]]) -> None:
pools[pool_id][namespace] = pool_name
def rebuild_queue(self) -> None:
- now = datetime.now()
-
# don't remove from queue "due" images
- now_string = datetime.strftime(now, "%Y-%m-%d %H:%M:00")
-
+ now = datetime.now(timezone.utc)
for schedule_time in list(self.queue):
- if schedule_time > now_string:
+ if schedule_time > now:
del self.queue[schedule_time]
if not self.schedules:
self.condition.notify()
def refresh_queue(self, current_pools: Dict[str, Dict[str, str]]) -> None:
- now = datetime.now()
+ now = datetime.now(timezone.utc)
for pool_id, namespaces in self.pools.items():
for namespace in namespaces:
if not self.queue:
return None, 1000.0
- now = datetime.now()
- schedule_time = sorted(self.queue)[0]
+ now = datetime.now(timezone.utc)
+ schedule_time = min(self.queue)
- if datetime.strftime(now, "%Y-%m-%d %H:%M:%S") < schedule_time:
- wait_time = (datetime.strptime(schedule_time,
- "%Y-%m-%d %H:%M:%S") - now)
- return None, wait_time.total_seconds()
+ if now < schedule_time:
+ return None, (schedule_time - now).total_seconds()
namespaces = self.queue[schedule_time]
namespace = namespaces.pop(0)
continue
pool_name = self.pools[pool_id][namespace]
scheduled.append({
- 'schedule_time': schedule_time,
+ 'schedule_time': schedule_time.strftime("%Y-%m-%d %H:%M:00"),
'pool_id': pool_id,
'pool_name': pool_name,
'namespace': namespace