- which Ceph release the monitors are running
- whether msgr v1 or v2 addresses are used for the monitors
- whether IPv4 or IPv6 addresses are used for the monitors
+ - whether RADOS cache tiering is enabled (and which mode)
+ - whether pools are replicated or erasure coded, and
+ which erasure code profile plugin and parameters are in use
If you had telemetry enabled, you will need to re-opt-in with::
# - added device health metrics (i.e., SMART data, minus serial number)
# - added CephFS metadata (how many MDSs, fs features, how many data pools)
# - remove crush_rule
+# - added more pool metadata (rep vs ec, cache tiering mode, ec profile)
class Module(MgrModule):
config = dict()
report['created'] = mon_map['created']
+ # mons
v1_mons = 0
v2_mons = 0
ipv4_mons = 0
report['config'] = self.gather_configs()
+ # pools
num_pg = 0
report['pools'] = list()
for pool in osd_map['pools']:
num_pg += pool['pg_num']
+ ec_profile = {}
+ if pool['erasure_code_profile']:
+ orig = osd_map['erasure_code_profiles'].get(
+ pool['erasure_code_profile'], {})
+ ec_profile = {
+ k: orig[k] for k in orig.keys()
+ if k in ['k', 'm', 'plugin', 'technique',
+ 'crush-failure-domain', 'l']
+ }
report['pools'].append(
{
'pool': pool['pool'],
'pg_autoscale_mode': pool['pg_autoscale_mode'],
'target_max_bytes': pool['target_max_bytes'],
'target_max_objects': pool['target_max_objects'],
+ 'type': ['', 'replicated', '', 'erasure'][pool['type']],
+ 'erasure_code_profile': ec_profile,
+ 'cache_mode': pool['cache_mode'],
}
)
+ # osds
report['osd'] = {
'count': len(osd_map['osds']),
'require_osd_release': osd_map['require_osd_release'],
'require_min_compat_client': osd_map['require_min_compat_client']
}
+ # cephfs
report['fs'] = {
'count': len(fs_map['filesystems']),
'feature_flags': fs_map['feature_flags'],
num_mds += len(fs['info'])
report['fs']['total_num_mds'] = num_mds
+ # daemons
report['metadata'] = dict()
report['metadata']['osd'] = self.gather_osd_metadata(osd_map)
report['metadata']['mon'] = self.gather_mon_metadata(mon_map)