(result,) = self._ctx.cluster.only(first_mon).remotes.keys()
return result
- def __init__(self, ctx) -> None:
+ def __init__(self, ctx, cluster_name='ceph') -> None:
self._ctx = ctx
- self.mon_manager = CephManager(self.admin_remote, ctx=ctx,
- logger=log.getChild('ceph_manager'))
+ try:
+ manager = ctx.managers[cluster_name]
+ except Exception as e:
+ log.warn(f"Couldn't get a manager for cluster {cluster_name} from the context; exception: {e}")
+ manager = CephManager(self.admin_remote, ctx=ctx,
+ logger=log.getChild('ceph_manager'))
+ self.mon_manager = manager
def get_config(self, key, service_type=None):
"""
as a separate instance outside of your (multiple) Filesystem instances.
"""
- def __init__(self, ctx):
- super(MDSClusterBase, self).__init__(ctx)
+ def __init__(self, ctx, cluster_name='ceph'):
+ super(MDSClusterBase, self).__init__(ctx, cluster_name=cluster_name)
@property
def mds_ids(self):
This object is for driving a CephFS filesystem. The MDS daemons driven by
MDSCluster may be shared with other Filesystems.
"""
- def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False,
+ def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False, cluster_name='ceph',
**kwargs):
"""
kwargs accepts recover: bool, allow_dangerous_metadata_overlay: bool,
yes_i_really_really_mean_it: bool and fs_ops: list[str]
"""
- super(FilesystemBase, self).__init__(ctx)
+ super(FilesystemBase, self).__init__(ctx, cluster_name=cluster_name)
self.name = name
self.id = None
# XXX: this class has nothing to do with the Ceph daemon (ceph-mgr) of
# the same name.
class LocalCephManager(CephManager):
- def __init__(self, ctx=None):
+ def __init__(self, ctx=None, cluster_name=None):
self.ctx = ctx
- if self.ctx:
- self.cluster = self.ctx.config['cluster']
+ self.cluster = cluster_name
# Deliberately skip parent init, only inheriting from it to get
# util methods like osd_dump that sit on top of raw_cluster_cmd
class LocalCephCluster(tasks.cephfs.filesystem.CephClusterBase):
- def __init__(self, ctx):
+ def __init__(self, ctx, cluster_name='ceph'):
# Deliberately skip calling CephCluster constructor
self._ctx = ctx
- self.mon_manager = LocalCephManager(ctx=self._ctx)
+ self.mon_manager = LocalCephManager(ctx=self._ctx, cluster_name=cluster_name)
self._conf = defaultdict(dict)
@property
tasks.cephfs.filesystem.CephCluster = LocalCephCluster
class LocalMDSCluster(LocalCephCluster, tasks.cephfs.filesystem.MDSClusterBase):
- def __init__(self, ctx):
- LocalCephCluster.__init__(self, ctx)
+ def __init__(self, ctx, cluster_name='ceph'):
+ LocalCephCluster.__init__(self, ctx, cluster_name=cluster_name)
# Deliberately skip calling MDSCluster constructor
self._mds_ids = ctx.daemons.daemons['ceph.mds'].keys()
log.debug("Discovered MDS IDs: {0}".format(self._mds_ids))
tasks.mgr.mgr_test_case.MgrCluster = LocalMgrCluster
class LocalFilesystem(LocalMDSCluster, tasks.cephfs.filesystem.FilesystemBase):
- def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False,
+ def __init__(self, ctx, fs_config={}, fscid=None, name=None, create=False, cluster_name='ceph',
**kwargs):
# Deliberately skip calling Filesystem constructor
- LocalMDSCluster.__init__(self, ctx)
+ LocalMDSCluster.__init__(self, ctx, cluster_name=cluster_name)
self.id = None
self.name = name
self.fs_config = fs_config
self.ec_profile = fs_config.get('ec_profile')
- self.mon_manager = LocalCephManager(ctx=self._ctx)
+ self.mon_manager = LocalCephManager(ctx=self._ctx, cluster_name=cluster_name)
self.client_remote = LocalRemote()
self.summary = get_summary("vstart_runner", None)
if not hasattr(self, 'managers'):
self.managers = {}
- self.managers[self.config['cluster']] = LocalCephManager(ctx=self)
+ self.managers[cluster_name] = LocalCephManager(ctx=self, cluster_name=cluster_name)
# Shove some LocalDaemons into the ctx.daemons DaemonGroup instance so that any
# tests that want to look these up via ctx can do so.