]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph.git/commitdiff
snap_schedule: add metadata fields, refactor Schedule, multiple Schedules per path
authorJan Fajerski <jfajerski@suse.com>
Fri, 3 Apr 2020 14:04:11 +0000 (16:04 +0200)
committerJan Fajerski <jfajerski@suse.com>
Thu, 27 Aug 2020 13:55:46 +0000 (15:55 +0200)
This commit adds a bunch of interesting metadata fields to the DB and
refactors the Schedule class so that it concentrates all the DB queries
in on place. Also enables to set multiple schedules per path as long as
the (repeat interval, start time) pairs are unique.

Signed-off-by: Jan Fajerski <jfajerski@suse.com>
src/pybind/mgr/snap_schedule/fs/schedule.py
src/pybind/mgr/snap_schedule/module.py

index bae2cd7d1476a585c58f0e27b97cec929988d3e3..50cb0d5e4026d3e1cae93fda503f5ff9104c57e6 100644 (file)
@@ -11,16 +11,18 @@ import re
 from mgr_util import CephfsClient, CephfsConnectionException, \
         open_filesystem
 from collections import OrderedDict
-from datetime import datetime
+from datetime import datetime, timezone
 import logging
-from operator import attrgetter, itemgetter
 from threading import Timer
 import sqlite3
 
 
 SNAP_SCHEDULE_NAMESPACE = 'cephfs-snap-schedule'
-SNAP_DB_OBJECT_NAME = 'snap_db'
-TS_FORMAT = '%Y-%m-%d-%H_%M_%S'
+SNAP_DB_PREFIX = 'snap_db'
+# increment this every time the db schema changes and provide upgrade code
+SNAP_DB_VERSION = '0'
+SNAP_DB_OBJECT_NAME = f'{SNAP_DB_PREFIX}_v{SNAP_DB_VERSION}'
+SNAPSHOT_TS_FORMAT = '%Y-%m-%d-%H_%M_%S'
 
 log = logging.getLogger(__name__)
 
@@ -42,15 +44,18 @@ def open_ioctx(self, pool):
 
 
 def updates_schedule_db(func):
-    def f(self, fs, *args):
-        func(self, fs, *args)
-        self.refresh_snap_timers(fs)
+    def f(self, fs, schedule_or_path, *args):
+        func(self, fs, schedule_or_path, *args)
+        path = schedule_or_path
+        if isinstance(schedule_or_path, Schedule):
+            path = schedule_or_path.path
+        self.refresh_snap_timers(fs, path)
     return f
 
 
 class Schedule(object):
     '''
-    Wrapper to work with schedules stored in Rados objects
+    Wrapper to work with schedules stored in sqlite
     '''
     def __init__(self,
                  path,
@@ -60,6 +65,12 @@ class Schedule(object):
                  fs_name,
                  subvol,
                  rel_path,
+                 created=None,
+                 first=None,
+                 last=None,
+                 last_pruned=None,
+                 created_count=0,
+                 pruned_count=0,
                  ):
         self.fs = fs_name
         self.subvol = subvol
@@ -67,24 +78,217 @@ class Schedule(object):
         self.rel_path = rel_path
         self.schedule = schedule
         self.retention = retention_policy
-        self._ret = {}
-        self.first_run = start
-        self.last_run = None
+        if start is None:
+            now = datetime.now(timezone.utc)
+            self.start = datetime(now.year,
+                                  now.month,
+                                  now.day,
+                                  tzinfo=now.tzinfo)
+        else:
+            self.start = datetime.fromisoformat(start).astimezone(timezone.utc)
+        if created is None:
+            self.created = datetime.now(timezone.utc)
+        else:
+            self.created = datetime.fromisoformat(created)
+        if first:
+            self.first = datetime.fromisoformat(first)
+        else:
+            self.first = first
+        if last:
+            self.last = datetime.fromisoformat(last)
+        else:
+            self.last = last
+        if last_pruned:
+            self.last_pruned = datetime.fromisoformat(last_pruned)
+        else:
+            self.last_pruned = last_pruned
+        self.created_count = created_count
+        self.pruned_count = pruned_count
+
+    @classmethod
+    def _from_get_query(cls, table_row, fs):
+        return cls(table_row['path'],
+                   table_row['schedule'],
+                   table_row['retention'],
+                   table_row['start'],
+                   fs,
+                   table_row['subvol'],
+                   table_row['rel_path'],
+                   table_row['created'],
+                   table_row['first'],
+                   table_row['last'],
+                   table_row['last_pruned'],
+                   table_row['created_count'],
+                   table_row['pruned_count'])
 
     def __str__(self):
-        return f'''{self.rel_path}: {self.schedule}; {self.retention}'''
+        return f'''{self.path}: {self.schedule}; {self.retention}'''
+
+    CREATE_TABLES = '''CREATE TABLE schedules(
+        id integer PRIMARY KEY ASC,
+        path text NOT NULL UNIQUE,
+        subvol text,
+        rel_path text NOT NULL,
+        active int NOT NULL
+    );
+    CREATE TABLE schedules_meta(
+        id INTEGER PRIMARY KEY ASC,
+        schedule_id INT,
+        start TEXT NOT NULL,
+        first TEXT,
+        last TEXT,
+        last_pruned TEXT,
+        created TEXT,
+        repeat BIGINT NOT NULL,
+        schedule TEXT NOT NULL,
+        created_count INT DEFAULT 0,
+        pruned_count INT DEFAULT 0,
+        retention TEXT,
+        FOREIGN KEY(schedule_id) REFERENCES schedules(id) ON DELETE CASCADE,
+        UNIQUE (start, repeat)
+    );'''
+
+    EXEC_QUERY = '''SELECT
+        sm.retention,
+        sm.repeat - (strftime("%s", "now") - strftime("%s", sm.start)) %
+        sm.repeat "until",
+        sm.start, sm.repeat
+        FROM schedules s
+            INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
+        WHERE
+            s.path = ? AND
+            strftime("%s", "now") - strftime("%s", sm.start) > 0
+        ORDER BY until;'''
+
+    GET_SCHEDULES = '''SELECT
+        s.path, s.subvol, s.rel_path, s.active,
+        sm.schedule, sm.retention, sm.start, sm.first, sm.last,
+        sm.last_pruned, sm.created, sm.created_count, sm.pruned_count
+        FROM schedules s
+            INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
+        WHERE s.path = ?'''
+
+    GET_SCHEDULE = '''SELECT
+        s.path, s.subvol, s.rel_path, s.active,
+        sm.schedule, sm.retention, sm.start, sm.first, sm.last,
+        sm.last_pruned, sm.created, sm.created_count, sm.pruned_count
+        FROM schedules s
+            INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
+        WHERE s.path = ? and sm.start = ? AND sm.repeat = ?'''
 
     @classmethod
-    def from_db_row(cls, table_row, fs):
-        return cls(table_row[0],
-                   table_row[1],
-                   table_row[2],
-                   fs,
-                   table_row[3],
-                   table_row[4],
-                   None)
+    def get_db_schedules(cls, path, db, fs):
+        with db:
+            c = db.execute(cls.GET_SCHEDULES, (path,))
+        return [cls._from_get_query(row, fs) for row in c.fetchall()]
 
-    def repeat_in_s(self):
+    @classmethod
+    def get_db_schedule(cls, db, fs, path, start, repeat):
+        with db:
+            c = db.execute(cls.GET_SCHEDULE, (path, start, repeat))
+        r = c.fetchone()
+        if r:
+            return cls._from_get_query(r, fs)
+        else:
+            return None
+
+    LIST_SCHEDULES = '''SELECT
+        s.path, sm.schedule, sm.retention
+        FROM schedules s
+            INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
+        WHERE'''
+
+    @classmethod
+    def list_schedules(cls, path, db, fs, recursive):
+        with db:
+            if recursive:
+                c = db.execute(cls.LIST_SCHEDULES + ' path LIKE ?',
+                               (f'{path}%',))
+            else:
+                c = db.execute(cls.LIST_SCHEDULES + ' path = ?',
+                               (f'{path}',))
+        return [row for row in c.fetchall()]
+
+    INSERT_SCHEDULE = '''INSERT INTO
+        schedules(path, subvol, rel_path, active)
+        Values(?, ?, ?, ?);'''
+    INSERT_SCHEDULE_META = '''INSERT INTO
+        schedules_meta(schedule_id, start, created, repeat, schedule, retention)
+        SELECT ?, ?, ?, ?, ?, ?'''
+
+    def store_schedule(self, db):
+        sched_id = None
+        with db:
+            try:
+                c = db.execute(self.INSERT_SCHEDULE,
+                               (self.path,
+                                self.subvol,
+                                self.rel_path,
+                                1))
+                sched_id = c.lastrowid
+            except sqlite3.IntegrityError:
+                # might be adding another schedule, retrieve sched id
+                log.debug(f'found schedule entry for {self.path}, trying to add meta')
+                c = db.execute('SELECT id FROM schedules where path = ?',
+                               (self.path,))
+                sched_id = c.fetchone()[0]
+                pass
+            db.execute(self.INSERT_SCHEDULE_META,
+                       (sched_id,
+                        self.start.isoformat(),
+                        self.created.isoformat(),
+                        self.repeat,
+                        self.schedule,
+                        self.retention))
+
+    @classmethod
+    def rm_schedule(cls, db, path, repeat, start):
+        with db:
+            cur = db.execute('SELECT id FROM schedules WHERE path = ?',
+                             (path,))
+            row = cur.fetchone()
+
+            if len(row) == 0:
+                log.info(f'no schedule for {path} found')
+                raise ValueError('SnapSchedule for {} not found'.format(path))
+
+            id_ = tuple(row)
+
+            if repeat or start:
+                meta_delete = 'DELETE FROM schedules_meta WHERE schedule_id = ?'
+                delete_param = id_
+                if repeat:
+                    meta_delete += ' AND schedule = ?'
+                    delete_param += (repeat,)
+                if start:
+                    meta_delete += ' AND start = ?'
+                    delete_param += (start,)
+                # maybe only delete meta entry
+                log.debug(f'executing {meta_delete}, {delete_param}')
+                res = db.execute(meta_delete + ';', delete_param).rowcount
+                if res < 1:
+                    raise ValueError(f'No schedule found for {repeat} {start}')
+                db.execute('COMMIT;')
+                # now check if we have schedules in meta left, if not delete
+                # the schedule as well
+                meta_count = db.execute(
+                    'SELECT COUNT() FROM schedules_meta WHERE schedule_id = ?',
+                    id_)
+                if meta_count.fetchone() == (0,):
+                    log.debug(
+                        f'no more schedules left, cleaning up schedules table')
+                    db.execute('DELETE FROM schedules WHERE id = ?;', id_)
+            else:
+                # just delete the schedule CASCADE DELETE takes care of the
+                # rest
+                db.execute('DELETE FROM schedules WHERE id = ?;', id_)
+
+    def report(self):
+        import pprint
+        return pprint.pformat(self.__dict__)
+
+    @property
+    def repeat(self):
         mult = self.schedule[-1]
         period = int(self.schedule[0:-1])
         if mult == 'm':
@@ -98,6 +302,49 @@ class Schedule(object):
         else:
             raise Exception('schedule multiplier not recognized')
 
+    UPDATE_LAST = '''UPDATE schedules_meta
+    SET
+      last = ?,
+      created_count = created_count + 1,
+      first = CASE WHEN first IS NULL THEN ? ELSE first END
+    WHERE
+      start = ? AND
+      repeat = ?;'''
+
+    def update_last(self, time, db):
+        with db:
+            db.execute(self.UPDATE_LAST, (time.isoformat(), time.isoformat(),
+                                          self.start.isoformat(), self.repeat))
+        self.created_count += 1
+        self.last = time
+
+    UPDATE_INACTIVE = '''UPDATE schedules
+    SET
+      active = 0
+    WHERE
+      path = ?;'''
+
+    def set_inactive(self, db):
+        with db:
+            log.debug(f'Deactivating schedule on non-existing path {self.path}')
+            db.execute(self.UPDATE_INACTIVE, (self.path,))
+
+    UPDATE_PRUNED = '''UPDATE schedules_meta
+    SET
+      last_pruned = ?,
+      pruned_count = pruned_count + ?
+    WHERE
+      start = ? AND
+      repeat = ?;'''
+
+    def update_pruned(self, time, db, pruned):
+        with db:
+            db.execute(self.UPDATE_PRUNED, (time.isoformat(), pruned,
+                                            self.start.isoformat(),
+                                            self.repeat))
+        self.pruned_count += pruned
+        self.last_pruned = time
+
 
 def parse_retention(retention):
     ret = {}
@@ -112,68 +359,19 @@ def parse_retention(retention):
 
 class SnapSchedClient(CephfsClient):
 
-    CREATE_TABLES = '''CREATE TABLE schedules(
-        id integer PRIMARY KEY ASC,
-        path text NOT NULL UNIQUE,
-        subvol text,
-        rel_path text NOT NULL,
-        active int NOT NULL
-    );
-    CREATE TABLE schedules_meta(
-        id integer PRIMARY KEY ASC,
-        schedule_id int,
-        start bigint NOT NULL,
-        repeat bigint NOT NULL,
-        schedule text NOT NULL,
-        retention text,
-        FOREIGN KEY(schedule_id) REFERENCES schedules(id) ON DELETE CASCADE,
-        UNIQUE (start, repeat)
-    );'''
-
     def __init__(self, mgr):
         super(SnapSchedClient, self).__init__(mgr)
         # TODO maybe iterate over all fs instance in fsmap and load snap dbs?
         self.sqlite_connections = {}
         self.active_timers = {}
 
-    EXEC_QUERY = '''SELECT
-        s.path, sm.retention,
-        sm.repeat - (strftime("%s", "now") - sm.start) % sm.repeat "until"
-        FROM schedules s
-            INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
-        ORDER BY until;'''
-
-    def refresh_snap_timers(self, fs):
-        try:
-            log.debug(f'SnapDB on {fs} changed, updating next Timer')
-            db = self.get_schedule_db(fs)
-            rows = []
-            with db:
-                cur = db.execute(self.EXEC_QUERY)
-                rows = cur.fetchmany(1)
-                log.debug(f'retrieved {cur.rowcount} rows')
-            timers = self.active_timers.get(fs, [])
-            for timer in timers:
-                timer.cancel()
-            timers = []
-            for row in rows:
-                log.debug(f'adding timer for {row}')
-                log.debug(f'Creating new snapshot timer')
-                t = Timer(row[2],
-                          self.create_scheduled_snapshot,
-                          args=[fs, row[0], row[1]])
-                t.start()
-                timers.append(t)
-                log.debug(f'Will snapshot {row[0]} in fs {fs} in {row[2]}s')
-            self.active_timers[fs] = timers
-        except Exception as e:
-            log.error(f'refresh raised {e}')
-
     def get_schedule_db(self, fs):
         if fs not in self.sqlite_connections:
-            self.sqlite_connections[fs] = sqlite3.connect(':memory:',
-                                                          check_same_thread=False)
+            self.sqlite_connections[fs] = sqlite3.connect(
+                ':memory:',
+                check_same_thread=False)
             with self.sqlite_connections[fs] as con:
+                con.row_factory = sqlite3.Row
                 con.execute("PRAGMA FOREIGN_KEYS = 1")
                 pool = self.get_metadata_pool(fs)
                 with open_ioctx(self, pool) as ioctx:
@@ -184,7 +382,7 @@ class SnapSchedClient(CephfsClient):
                         con.executescript(db)
                     except rados.ObjectNotFound:
                         log.info(f'No schedule DB found in {fs}')
-                        con.executescript(self.CREATE_TABLES)
+                        con.executescript(Schedule.CREATE_TABLES)
         return self.sqlite_connections[fs]
 
     def store_schedule_db(self, fs):
@@ -203,31 +401,66 @@ class SnapSchedClient(CephfsClient):
             ioctx.write_full(SNAP_DB_OBJECT_NAME,
                              '\n'.join(db_content).encode('utf-8'))
 
-    def create_scheduled_snapshot(self, fs_name, path, retention):
+    def refresh_snap_timers(self, fs, path):
+        try:
+            log.debug(f'SnapDB on {fs} changed for {path}, updating next Timer')
+            db = self.get_schedule_db(fs)
+            rows = []
+            with db:
+                cur = db.execute(Schedule.EXEC_QUERY, (path,))
+                rows = cur.fetchmany(1)
+            timers = self.active_timers.get((fs, path), [])
+            for timer in timers:
+                timer.cancel()
+            timers = []
+            for row in rows:
+                log.debug(f'adding timer for {row}')
+                log.debug(f'Creating new snapshot timer')
+                t = Timer(row[1],
+                          self.create_scheduled_snapshot,
+                          args=[fs, path, row[0], row[2], row[3]])
+                t.start()
+                timers.append(t)
+                log.debug(f'Will snapshot {row[0]} in fs {fs} in {row[1]}s')
+            self.active_timers[(fs, path)] = timers
+        except Exception as e:
+            log.error(f'refresh raised {e}')
+
+    def create_scheduled_snapshot(self, fs_name, path, retention, start, repeat):
         log.debug(f'Scheduled snapshot of {path} triggered')
         try:
-            time = datetime.utcnow().strftime(TS_FORMAT)
+            db = self.get_schedule_db(fs_name)
+            sched = Schedule.get_db_schedule(db, fs_name, path, start, repeat)
+            time = datetime.now(timezone.utc)
             with open_filesystem(self, fs_name) as fs_handle:
-                fs_handle.mkdir(f'{path}/.snap/scheduled-{time}', 0o755)
+                fs_handle.mkdir(f'{path}/.snap/scheduled-{time.strftime(SNAPSHOT_TS_FORMAT)}', 0o755)
             log.info(f'created scheduled snapshot of {path}')
+            # TODO change last snap timestamp in db, maybe first
+            sched.update_last(time, db)
         except cephfs.Error as e:
             log.info(f'scheduled snapshot creating of {path} failed: {e}')
+            # TODO set inactive if path doesn't exist
+            sched.set_inactive(db)
         except Exception as e:
             # catch all exceptions cause otherwise we'll never know since this
             # is running in a thread
             log.error(f'ERROR create_scheduled_snapshot raised{e}')
         finally:
-            log.info(f'finally branch')
-            self.refresh_snap_timers(fs_name)
-            log.info(f'calling prune')
-            self.prune_snapshots(fs_name, path, retention)
-
-    def prune_snapshots(self, fs_name, path, retention):
-        log.debug('Pruning snapshots')
-        ret = parse_retention(retention)
+            self.refresh_snap_timers(fs_name, path)
+            self.prune_snapshots(sched)
+
+    def prune_snapshots(self, sched):
         try:
+            log.debug('Pruning snapshots')
+            ret = parse_retention(sched.retention)
+            path = sched.path
+            if not ret:
+                # TODO prune if too many (300?)
+                log.debug(f'schedule on {path} has no retention specified')
+                return
             prune_candidates = set()
-            with open_filesystem(self, fs_name) as fs_handle:
+            time = datetime.now(timezone.utc)
+            with open_filesystem(self, sched.fs) as fs_handle:
                 with fs_handle.opendir(f'{path}/.snap') as d_handle:
                     dir_ = fs_handle.readdir(d_handle)
                     while dir_:
@@ -235,20 +468,21 @@ class SnapSchedClient(CephfsClient):
                             log.debug(f'add {dir_.d_name} to pruning')
                             ts = datetime.strptime(
                                 dir_.d_name.lstrip(b'scheduled-').decode('utf-8'),
-                                TS_FORMAT)
+                                SNAPSHOT_TS_FORMAT)
                             prune_candidates.add((dir_, ts))
                         else:
                             log.debug(f'skipping dir entry {dir_.d_name}')
                         dir_ = fs_handle.readdir(d_handle)
-                to_keep = self.get_prune_set(prune_candidates, ret)
-                for k in prune_candidates - to_keep:
+                to_prune = self.get_prune_set(prune_candidates, ret)
+                for k in to_prune:
                     dirname = k[0].d_name.decode('utf-8')
                     log.debug(f'rmdir on {dirname}')
                     fs_handle.rmdir(f'{path}/.snap/{dirname}')
-                log.debug(f'keeping {to_keep}')
+                if to_prune:
+                    sched.update_pruned(time, self.get_schedule_db(sched.fs),
+                                        len(to_prune))
         except Exception as e:
             log.debug(f'prune_snapshots threw {e}')
-        # TODO: handle snap pruning accoring to retention
 
     def get_prune_set(self, candidates, retention):
         PRUNING_PATTERNS = OrderedDict([
@@ -282,98 +516,24 @@ class SnapSchedClient(CephfsClient):
                             break
             # TODO maybe do candidates - keep here? we want snaps counting it
             # hours not be considered for days and it cuts down on iterations
-        return keep
-
-    def validate_schedule(self, fs_name, sched):
-        try:
-            with open_filesystem(self, fs_name) as fs_handle:
-                fs_handle.stat(sched.path)
-        except cephfs.ObjectNotFound:
-            log.error('Path {} not found'.format(sched.path))
-            return False
-        return True
-
-    LIST_SCHEDULES = '''SELECT
-        s.path, sm.schedule, sm.retention, sm.start, s.subvol, s.rel_path
-        FROM schedules s
-            INNER JOIN schedules_meta sm ON sm.schedule_id = s.id
-        WHERE s.path = ?'''
+        return candidates - keep
 
-    def list_snap_schedules(self, fs, path):
+    def get_snap_schedules(self, fs, path):
         db = self.get_schedule_db(fs)
-        c = db.execute(self.LIST_SCHEDULES, (path,))
-        return [Schedule.from_db_row(row, fs) for row in c.fetchall()]
+        return Schedule.get_db_schedules(path, db, fs)
 
-    def dump_snap_schedule(self, fs, path):
+    def list_snap_schedules(self, fs, path, recursive):
         db = self.get_schedule_db(fs)
-        # TODO retrieve multiple schedules per path from schedule_meta
-        # with db:
-        c = db.execute('SELECT * FROM SCHEDULES WHERE path LIKE ?',
-                       (f'{path}%',))
-        return [row for row in c.fetchall()]
-
-# TODO currently not used, probably broken
-    UPDATE_SCHEDULE = '''UPDATE schedules
-        SET
-            schedule = ?,
-            active = 1
-        WHERE path = ?;'''
-    UPDATE_SCHEDULE_META = '''UPDATE schedules_meta
-        SET
-            start = ?,
-            repeat = ?,
-            schedule = ?
-            retention = ?
-        WHERE schedule_id = (
-            SELECT id FROM SCHEDULES WHERE path = ?);'''
-
-    @updates_schedule_db
-    def update_snap_schedule(self, fs, sched):
-        db = self.get_schedule_db(fs)
-        with db:
-            db.execute(self.UPDATE_SCHEDULE,
-                       (sched.schedule,
-                        sched.path))
-            db.execute(self.UPDATE_SCHEDULE_META,
-                       (f'strftime("%s", "{sched.first_run}")',
-                        sched.repeat_in_s(),
-                        sched.retention,
-                        sched.path))
-
-    INSERT_SCHEDULE = '''INSERT INTO
-        schedules(path, subvol, rel_path, active)
-        Values(?, ?, ?, ?);'''
-    INSERT_SCHEDULE_META = '''INSERT INTO
-        schedules_meta(schedule_id, start, repeat, schedule, retention)
-        Values(last_insert_rowid(), ?, ?, ?, ?)'''
+        return Schedule.list_schedules(path, db, fs, recursive)
 
     @updates_schedule_db
     def store_snap_schedule(self, fs, sched):
+        log.debug(f'attempting to add schedule {sched}')
         db = self.get_schedule_db(fs)
-        with db:
-            try:
-                db.execute(self.INSERT_SCHEDULE,
-                           (sched.path,
-                            sched.subvol,
-                            sched.rel_path,
-                            1))
-            except sqlite3.IntegrityError:
-                # might be adding another schedule
-                pass
-            db.execute(self.INSERT_SCHEDULE_META,
-                       (f'strftime("%s", "{sched.first_run}")',
-                        sched.repeat_in_s(),
-                        sched.schedule,
-                        sched.retention))
-            self.store_schedule_db(sched.fs)
+        sched.store_schedule(db)
+        self.store_schedule_db(sched.fs)
 
     @updates_schedule_db
-    def rm_snap_schedule(self, fs, path):
+    def rm_snap_schedule(self, fs, path, repeat, start):
         db = self.get_schedule_db(fs)
-        with db:
-            cur = db.execute('SELECT id FROM SCHEDULES WHERE path = ?',
-                             (path,))
-            id_ = cur.fetchone()
-            # TODO check for repeat-start pair and delete that if present, all
-            # otherwise. If repeat-start was speced, delete only those
-            db.execute('DELETE FROM schedules WHERE id = ?;', id_)
+        Schedule.rm_schedule(db, path, repeat, start)
index 89c78c3ef9759362ca6158b180c0fe34942b4e7e..b7369cc5bc4bc9f13db0a5f6c8b4cd8b45aee929 100644 (file)
@@ -4,6 +4,7 @@ Copyright (C) 2019 SUSE
 LGPL2.1.  See file COPYING.
 """
 import errno
+import json
 import sqlite3
 from .fs.schedule import SnapSchedClient, Schedule
 from mgr_module import MgrModule, CLIReadCommand, CLIWriteCommand
@@ -26,7 +27,7 @@ class Module(MgrModule):
         rc, subvol_path, err = self.remote('fs', 'subvolume', 'getpath',
                                            fs, subvol)
         if rc != 0:
-            # TODO custom exception
+            # TODO custom exception?
             raise Exception(f'Could not resolve {path} in {fs}, {subvol}')
         return subvol_path + path
 
@@ -47,35 +48,37 @@ class Module(MgrModule):
         self._initialized.wait()
         return -errno.EINVAL, "", "Unknown command"
 
-    @CLIReadCommand('fs snap-schedule dump',
+    @CLIReadCommand('fs snap-schedule status',
                     'name=path,type=CephString,req=false '
                     'name=subvol,type=CephString,req=false '
                     'name=fs,type=CephString,req=false',
                     'List current snapshot schedules')
-    def snap_schedule_dump(self, path='/', subvol=None, fs=None):
+    def snap_schedule_get(self, path='/', subvol=None, fs=None):
         use_fs = fs if fs else self.default_fs
         try:
-            ret_scheds = self.client.dump_snap_schedule(use_fs, path)
+            ret_scheds = self.client.get_snap_schedules(use_fs, path)
         except CephfsConnectionException as e:
             return e.to_tuple()
-        return 0, ' '.join(str(ret_scheds)), ''
+        return 0, '\n===\n'.join([ret_sched.report() for ret_sched in ret_scheds]), ''
 
     @CLIReadCommand('fs snap-schedule list',
                     'name=path,type=CephString '
+                    'name=recursive,type=CephString,req=false '
                     'name=subvol,type=CephString,req=false '
                     'name=fs,type=CephString,req=false',
                     'Get current snapshot schedule for <path>')
-    def snap_schedule_list(self, path, subvol=None, fs=None):
+    def snap_schedule_list(self, path, subvol=None, recursive=False, fs=None):
         try:
             use_fs = fs if fs else self.default_fs
-            scheds = self.client.list_snap_schedules(use_fs, path)
+            scheds = self.client.list_snap_schedules(use_fs, path, recursive)
+            self.log.debug(f'recursive is {recursive}')
         except CephfsConnectionException as e:
             return e.to_tuple()
         if not scheds:
-            return -1, '', f'SnapSchedule for {path} not found'
-        return 0, str([str(sched) for sched in scheds]), ''
+            return errno.ENOENT, '', f'SnapSchedule for {path} not found'
+        return 0, json.dumps([[sched[1], sched[2]] for sched in scheds]), ''
 
-    @CLIWriteCommand('fs snap-schedule set',
+    @CLIWriteCommand('fs snap-schedule add',
                      'name=path,type=CephString '
                      'name=snap-schedule,type=CephString '
                      'name=retention-policy,type=CephString,req=false '
@@ -83,11 +86,11 @@ class Module(MgrModule):
                      'name=fs,type=CephString,req=false '
                      'name=subvol,type=CephString,req=false',
                      'Set a snapshot schedule for <path>')
-    def snap_schedule_set(self,
+    def snap_schedule_add(self,
                           path,
                           snap_schedule,
                           retention_policy='',
-                          start='00:00',
+                          start=None,
                           fs=None,
                           subvol=None):
         try:
@@ -95,30 +98,37 @@ class Module(MgrModule):
             abs_path = self.resolve_subvolume_path(fs, subvol, path)
             sched = Schedule(abs_path, snap_schedule, retention_policy,
                              start, use_fs, subvol, path)
-            # TODO allow schedules on non-existent paths?
-            # self.client.validate_schedule(fs, sched)
             self.client.store_snap_schedule(use_fs, sched)
             suc_msg = f'Schedule set for path {path}'
         except sqlite3.IntegrityError:
-            existing_sched = self.client.list_snap_schedule(use_fs, path)
-            error_msg = f'Found existing schedule {existing_sched}'
+            existing_scheds = self.client.get_snap_schedules(use_fs, path)
+            report = [s.report() for s in existing_scheds]
+            error_msg = f'Found existing schedule {report}'
             self.log.error(error_msg)
-            return 1, '', error_msg
+            return errno.EEXISTS, '', error_msg
         except CephfsConnectionException as e:
             return e.to_tuple()
         return 0, suc_msg, ''
 
     @CLIWriteCommand('fs snap-schedule remove',
                      'name=path,type=CephString '
+                     'name=repeat,type=CephString,req=false '
+                     'name=start,type=CephString,req=false '
                      'name=subvol,type=CephString,req=false '
                      'name=fs,type=CephString,req=false',
                      'Remove a snapshot schedule for <path>')
-    def snap_schedule_rm(self, path, subvol=None, fs=None):
+    def snap_schedule_rm(self,
+                         path,
+                         repeat=None,
+                         start=None,
+                         subvol=None,
+                         fs=None):
         try:
             use_fs = fs if fs else self.default_fs
-            self.client.rm_snap_schedule(use_fs, path)
+            abs_path = self.resolve_subvolume_path(fs, subvol, path)
+            self.client.rm_snap_schedule(use_fs, abs_path, repeat, start)
         except CephfsConnectionException as e:
             return e.to_tuple()
-        except ObjectNotFound as e:
-            return e.errno, '', 'SnapSchedule for {} not found'.format(path)
+        except ValueError as e:
+            return errno.ENOENT, '', str(e)
         return 0, 'Schedule removed for path {}'.format(path), ''