From: hsiang41 Date: Wed, 7 Nov 2018 14:05:35 +0000 (+0800) Subject: mgr: Separate diskprediction cloud plugin from the diskprediction plugin X-Git-Tag: v14.1.0~907^2 X-Git-Url: http://git.apps.os.sepia.ceph.com/?a=commitdiff_plain;h=9799eb67eb86c50ea0c200de0e322076649a4bdb;p=ceph-ci.git mgr: Separate diskprediction cloud plugin from the diskprediction plugin Separate diskprediction local cloud from the diskprediction plugin. Devicehealth invoke device prediction function related on the global configuration "device_failure_prediction_mode". Signed-off-by: Rick Chen --- diff --git a/doc/mgr/diskprediction.rst b/doc/mgr/diskprediction.rst index bbc163f5c2e..eefe6ad8527 100644 --- a/doc/mgr/diskprediction.rst +++ b/doc/mgr/diskprediction.rst @@ -12,7 +12,8 @@ Enabling Run the following command to enable the *diskprediction* module in the Ceph environment:: - ceph mgr module enable diskprediction + ceph mgr module enable diskprediction_cloud + ceph mgr module enable diskprediction_local Select the prediction mode:: diff --git a/qa/tasks/mgr/test_module_selftest.py b/qa/tasks/mgr/test_module_selftest.py index 780f4751239..26673971b64 100644 --- a/qa/tasks/mgr/test_module_selftest.py +++ b/qa/tasks/mgr/test_module_selftest.py @@ -47,8 +47,11 @@ class TestModuleSelftest(MgrTestCase): def test_influx(self): self._selftest_plugin("influx") - def test_diskprediction(self): - self._selftest_plugin("diskprediction") + def test_diskprediction_local(self): + self._selftest_plugin("diskprediction_local") + + def test_diskprediction_cloud(self): + self._selftest_plugin("diskprediction_cloud") def test_telegraf(self): self._selftest_plugin("telegraf") diff --git a/src/pybind/mgr/diskprediction/__init__.py b/src/pybind/mgr/diskprediction/__init__.py deleted file mode 100644 index e65bbfbc867..00000000000 --- a/src/pybind/mgr/diskprediction/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from __future__ import absolute_import -from .module import Module diff --git a/src/pybind/mgr/diskprediction/agent/__init__.py b/src/pybind/mgr/diskprediction/agent/__init__.py deleted file mode 100644 index 64a456fa8e1..00000000000 --- a/src/pybind/mgr/diskprediction/agent/__init__.py +++ /dev/null @@ -1,38 +0,0 @@ -from __future__ import absolute_import - -from ..common import timeout, TimeoutError - - -class BaseAgent(object): - - measurement = '' - - def __init__(self, mgr_module, obj_sender, timeout=30): - self.data = [] - self._client = None - self._client = obj_sender - self._logger = mgr_module.log - self._module_inst = mgr_module - self._timeout = timeout - - def run(self): - try: - self._collect_data() - self._run() - except TimeoutError: - self._logger.error('{} failed to execute {} task'.format( - __name__, self.measurement)) - - def __nonzero__(self): - if not self._module_inst and not self._client: - return False - else: - return True - - @timeout() - def _run(self): - pass - - @timeout() - def _collect_data(self): - pass diff --git a/src/pybind/mgr/diskprediction/agent/metrics/__init__.py b/src/pybind/mgr/diskprediction/agent/metrics/__init__.py deleted file mode 100644 index 57fbfd5d2eb..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/__init__.py +++ /dev/null @@ -1,61 +0,0 @@ -from __future__ import absolute_import - -from .. import BaseAgent -from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING, DP_MGR_STAT_OK - -AGENT_VERSION = '1.0.0' - - -class MetricsField(object): - def __init__(self): - self.tags = {} - self.fields = {} - self.timestamp = None - - def __str__(self): - return str({ - 'tags': self.tags, - 'fields': self.fields, - 'timestamp': self.timestamp - }) - - -class MetricsAgent(BaseAgent): - - def log_summary(self, status_info): - try: - if status_info: - measurement = status_info['measurement'] - success_count = status_info['success_count'] - failure_count = status_info['failure_count'] - total_count = success_count + failure_count - display_string = \ - '%s agent stats in total count: %s, success count: %s, failure count: %s.' - self._logger.info( - display_string % (measurement, total_count, success_count, failure_count) - ) - except Exception as e: - self._logger.error(str(e)) - - def _run(self): - collect_data = self.data - result = {} - if collect_data: - status_info = self._client.send_info(collect_data, self.measurement) - # show summary info - self.log_summary(status_info) - # write sub_agent buffer - total_count = status_info['success_count'] + status_info['failure_count'] - if total_count: - if status_info['success_count'] == 0: - self._module_inst.status = \ - {'status': DP_MGR_STAT_FAILED, - 'reason': 'failed to send metrics data to the server'} - elif status_info['failure_count'] == 0: - self._module_inst.status = \ - {'status': DP_MGR_STAT_OK} - else: - self._module_inst.status = \ - {'status': DP_MGR_STAT_WARNING, - 'reason': 'failed to send partial metrics data to the server'} - return result diff --git a/src/pybind/mgr/diskprediction/agent/metrics/ceph_cluster.py b/src/pybind/mgr/diskprediction/agent/metrics/ceph_cluster.py deleted file mode 100644 index d49b063b247..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/ceph_cluster.py +++ /dev/null @@ -1,146 +0,0 @@ -from __future__ import absolute_import - -import socket - -from . import MetricsAgent, MetricsField -from ...common.clusterdata import ClusterAPI - - -class CephCluster(MetricsField): - """ Ceph cluster structure """ - measurement = 'ceph_cluster' - - def __init__(self): - super(CephCluster, self).__init__() - self.tags['cluster_id'] = None - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.fields['cluster_health'] = '' - self.fields['num_mon'] = None - self.fields['num_mon_quorum'] = None - self.fields['num_osd'] = None - self.fields['num_osd_up'] = None - self.fields['num_osd_in'] = None - self.fields['osd_epoch'] = None - self.fields['osd_bytes'] = None - self.fields['osd_bytes_used'] = None - self.fields['osd_bytes_avail'] = None - self.fields['num_pool'] = None - self.fields['num_pg'] = None - self.fields['num_pg_active_clean'] = None - self.fields['num_pg_active'] = None - self.fields['num_pg_peering'] = None - self.fields['num_object'] = None - self.fields['num_object_degraded'] = None - self.fields['num_object_misplaced'] = None - self.fields['num_object_unfound'] = None - self.fields['num_bytes'] = None - self.fields['num_mds_up'] = None - self.fields['num_mds_in'] = None - self.fields['num_mds_failed'] = None - self.fields['mds_epoch'] = None - - -class CephClusterAgent(MetricsAgent): - measurement = 'ceph_cluster' - - def _collect_data(self): - # process data and save to 'self.data' - obj_api = ClusterAPI(self._module_inst) - cluster_id = obj_api.get_cluster_id() - - c_data = CephCluster() - cluster_state = obj_api.get_health_status() - c_data.tags['cluster_id'] = cluster_id - c_data.fields['cluster_health'] = str(cluster_state) - c_data.fields['agenthost'] = socket.gethostname() - c_data.tags['agenthost_domain_id'] = \ - '%s_%s' % (cluster_id, c_data.fields['agenthost']) - c_data.fields['osd_epoch'] = obj_api.get_osd_epoch() - c_data.fields['num_mon'] = len(obj_api.get_mons()) - c_data.fields['num_mon_quorum'] = \ - len(obj_api.get_mon_status().get('quorum', [])) - - osds = obj_api.get_osds() - num_osd_up = 0 - num_osd_in = 0 - for osd_data in osds: - if osd_data.get('up'): - num_osd_up = num_osd_up + 1 - if osd_data.get('in'): - num_osd_in = num_osd_in + 1 - if osds: - c_data.fields['num_osd'] = len(osds) - else: - c_data.fields['num_osd'] = 0 - c_data.fields['num_osd_up'] = num_osd_up - c_data.fields['num_osd_in'] = num_osd_in - c_data.fields['num_pool'] = len(obj_api.get_osd_pools()) - - df_stats = obj_api.get_df_stats() - total_bytes = df_stats.get('total_bytes', 0) - total_used_bytes = df_stats.get('total_used_bytes', 0) - total_avail_bytes = df_stats.get('total_avail_bytes', 0) - c_data.fields['osd_bytes'] = total_bytes - c_data.fields['osd_bytes_used'] = total_used_bytes - c_data.fields['osd_bytes_avail'] = total_avail_bytes - if total_bytes and total_avail_bytes: - c_data.fields['osd_bytes_used_percentage'] = \ - round(float(total_used_bytes) / float(total_bytes) * 100, 4) - else: - c_data.fields['osd_bytes_used_percentage'] = 0.0000 - - pg_stats = obj_api.get_pg_stats() - num_bytes = 0 - num_object = 0 - num_object_degraded = 0 - num_object_misplaced = 0 - num_object_unfound = 0 - num_pg_active = 0 - num_pg_active_clean = 0 - num_pg_peering = 0 - for pg_data in pg_stats: - num_pg_active = num_pg_active + len(pg_data.get('acting')) - if 'active+clean' in pg_data.get('state'): - num_pg_active_clean = num_pg_active_clean + 1 - if 'peering' in pg_data.get('state'): - num_pg_peering = num_pg_peering + 1 - - stat_sum = pg_data.get('stat_sum', {}) - num_object = num_object + stat_sum.get('num_objects', 0) - num_object_degraded = \ - num_object_degraded + stat_sum.get('num_objects_degraded', 0) - num_object_misplaced = \ - num_object_misplaced + stat_sum.get('num_objects_misplaced', 0) - num_object_unfound = \ - num_object_unfound + stat_sum.get('num_objects_unfound', 0) - num_bytes = num_bytes + stat_sum.get('num_bytes', 0) - - c_data.fields['num_pg'] = len(pg_stats) - c_data.fields['num_object'] = num_object - c_data.fields['num_object_degraded'] = num_object_degraded - c_data.fields['num_object_misplaced'] = num_object_misplaced - c_data.fields['num_object_unfound'] = num_object_unfound - c_data.fields['num_bytes'] = num_bytes - c_data.fields['num_pg_active'] = num_pg_active - c_data.fields['num_pg_active_clean'] = num_pg_active_clean - c_data.fields['num_pg_peering'] = num_pg_active_clean - - filesystems = obj_api.get_file_systems() - num_mds_in = 0 - num_mds_up = 0 - num_mds_failed = 0 - mds_epoch = 0 - for fs_data in filesystems: - num_mds_in = \ - num_mds_in + len(fs_data.get('mdsmap', {}).get('in', [])) - num_mds_up = \ - num_mds_up + len(fs_data.get('mdsmap', {}).get('up', {})) - num_mds_failed = \ - num_mds_failed + len(fs_data.get('mdsmap', {}).get('failed', [])) - mds_epoch = mds_epoch + fs_data.get('mdsmap', {}).get('epoch', 0) - c_data.fields['num_mds_in'] = num_mds_in - c_data.fields['num_mds_up'] = num_mds_up - c_data.fields['num_mds_failed'] = num_mds_failed - c_data.fields['mds_epoch'] = mds_epoch - self.data.append(c_data) diff --git a/src/pybind/mgr/diskprediction/agent/metrics/ceph_mon_osd.py b/src/pybind/mgr/diskprediction/agent/metrics/ceph_mon_osd.py deleted file mode 100644 index 0c85fb514cb..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/ceph_mon_osd.py +++ /dev/null @@ -1,159 +0,0 @@ -from __future__ import absolute_import - -import socket - -from . import MetricsAgent, MetricsField -from ...common.clusterdata import ClusterAPI - - -class CephMON(MetricsField): - """ Ceph monitor structure """ - measurement = 'ceph_mon' - - def __init__(self): - super(CephMON, self).__init__() - self.tags['cluster_id'] = None - self.tags['mon_id'] = None - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.fields['num_sessions'] = None - self.fields['session_add'] = None - self.fields['session_rm'] = None - self.fields['session_trim'] = None - self.fields['num_elections'] = None - self.fields['election_call'] = None - self.fields['election_win'] = None - self.fields['election_lose'] = None - - -class CephOSD(MetricsField): - """ Ceph osd structure """ - measurement = 'ceph_osd' - - def __init__(self): - super(CephOSD, self).__init__() - self.tags['cluster_id'] = None - self.tags['osd_id'] = None - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.tags['host_domain_id'] = None - self.fields['op_w'] = None - self.fields['op_in_bytes'] = None - self.fields['op_r'] = None - self.fields['op_out_bytes'] = None - self.fields['op_wip'] = None - self.fields['op_latency'] = None - self.fields['op_process_latency'] = None - self.fields['op_r_latency'] = None - self.fields['op_r_process_latency'] = None - self.fields['op_w_in_bytes'] = None - self.fields['op_w_latency'] = None - self.fields['op_w_process_latency'] = None - self.fields['op_w_prepare_latency'] = None - self.fields['op_rw'] = None - self.fields['op_rw_in_bytes'] = None - self.fields['op_rw_out_bytes'] = None - self.fields['op_rw_latency'] = None - self.fields['op_rw_process_latency'] = None - self.fields['op_rw_prepare_latency'] = None - self.fields['op_before_queue_op_lat'] = None - self.fields['op_before_dequeue_op_lat'] = None - - -class CephMonOsdAgent(MetricsAgent): - measurement = 'ceph_mon_osd' - - # counter types - PERFCOUNTER_LONGRUNAVG = 4 - PERFCOUNTER_COUNTER = 8 - PERFCOUNTER_HISTOGRAM = 0x10 - PERFCOUNTER_TYPE_MASK = ~3 - - def _stattype_to_str(self, stattype): - typeonly = stattype & self.PERFCOUNTER_TYPE_MASK - if typeonly == 0: - return 'gauge' - if typeonly == self.PERFCOUNTER_LONGRUNAVG: - # this lie matches the DaemonState decoding: only val, no counts - return 'counter' - if typeonly == self.PERFCOUNTER_COUNTER: - return 'counter' - if typeonly == self.PERFCOUNTER_HISTOGRAM: - return 'histogram' - return '' - - def _generate_osd(self, cluster_id, service_name, perf_counts): - obj_api = ClusterAPI(self._module_inst) - service_id = service_name[4:] - d_osd = CephOSD() - stat_bytes = 0 - stat_bytes_used = 0 - d_osd.tags['cluster_id'] = cluster_id - d_osd.tags['osd_id'] = service_name[4:] - d_osd.fields['agenthost'] = socket.gethostname() - d_osd.tags['agenthost_domain_id'] = \ - '%s_%s' % (cluster_id, d_osd.fields['agenthost']) - d_osd.tags['host_domain_id'] = \ - '%s_%s' % (cluster_id, - obj_api.get_osd_hostname(d_osd.tags['osd_id'])) - for i_key, i_val in perf_counts.iteritems(): - if i_key[:4] == 'osd.': - key_name = i_key[4:] - else: - key_name = i_key - if self._stattype_to_str(i_val['type']) == 'counter': - value = obj_api.get_rate('osd', service_id, i_key) - else: - value = obj_api.get_latest('osd', service_id, i_key) - if key_name == 'stat_bytes': - stat_bytes = value - elif key_name == 'stat_bytes_used': - stat_bytes_used = value - else: - d_osd.fields[key_name] = value - - if stat_bytes and stat_bytes_used: - d_osd.fields['stat_bytes_used_percentage'] = \ - round(float(stat_bytes_used) / float(stat_bytes) * 100, 4) - else: - d_osd.fields['stat_bytes_used_percentage'] = 0.0000 - self.data.append(d_osd) - - def _generate_mon(self, cluster_id, service_name, perf_counts): - d_mon = CephMON() - d_mon.tags['cluster_id'] = cluster_id - d_mon.tags['mon_id'] = service_name[4:] - d_mon.fields['agenthost'] = socket.gethostname() - d_mon.tags['agenthost_domain_id'] = \ - '%s_%s' % (cluster_id, d_mon.fields['agenthost']) - d_mon.fields['num_sessions'] = \ - perf_counts.get('mon.num_sessions', {}).get('value', 0) - d_mon.fields['session_add'] = \ - perf_counts.get('mon.session_add', {}).get('value', 0) - d_mon.fields['session_rm'] = \ - perf_counts.get('mon.session_rm', {}).get('value', 0) - d_mon.fields['session_trim'] = \ - perf_counts.get('mon.session_trim', {}).get('value', 0) - d_mon.fields['num_elections'] = \ - perf_counts.get('mon.num_elections', {}).get('value', 0) - d_mon.fields['election_call'] = \ - perf_counts.get('mon.election_call', {}).get('value', 0) - d_mon.fields['election_win'] = \ - perf_counts.get('mon.election_win', {}).get('value', 0) - d_mon.fields['election_lose'] = \ - perf_counts.get('election_lose', {}).get('value', 0) - self.data.append(d_mon) - - def _collect_data(self): - # process data and save to 'self.data' - obj_api = ClusterAPI(self._module_inst) - perf_data = obj_api.get_all_perf_counters() - if not perf_data and not isinstance(perf_data, dict): - self._logger.error('unable to get all perf counters') - return - cluster_id = obj_api.get_cluster_id() - for n_name, i_perf in perf_data.iteritems(): - if n_name[0:3].lower() == 'mon': - self._generate_mon(cluster_id, n_name, i_perf) - elif n_name[0:3].lower() == 'osd': - self._generate_osd(cluster_id, n_name, i_perf) diff --git a/src/pybind/mgr/diskprediction/agent/metrics/ceph_pool.py b/src/pybind/mgr/diskprediction/agent/metrics/ceph_pool.py deleted file mode 100644 index 86ee10a21d9..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/ceph_pool.py +++ /dev/null @@ -1,58 +0,0 @@ -from __future__ import absolute_import - -import socket - -from . import MetricsAgent, MetricsField -from ...common.clusterdata import ClusterAPI - - -class CephPool(MetricsField): - """ Ceph pool structure """ - measurement = 'ceph_pool' - - def __init__(self): - super(CephPool, self).__init__() - self.tags['cluster_id'] = None - self.tags['pool_id'] = None - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.fields['bytes_used'] = None - self.fields['max_avail'] = None - self.fields['objects'] = None - self.fields['wr_bytes'] = None - self.fields['dirty'] = None - self.fields['rd_bytes'] = None - self.fields['raw_bytes_used'] = None - - -class CephPoolAgent(MetricsAgent): - measurement = 'ceph_pool' - - def _collect_data(self): - # process data and save to 'self.data' - obj_api = ClusterAPI(self._module_inst) - df_data = obj_api.get('df') - cluster_id = obj_api.get_cluster_id() - for pool in df_data.get('pools', []): - d_pool = CephPool() - p_id = pool.get('id') - d_pool.tags['cluster_id'] = cluster_id - d_pool.tags['pool_id'] = p_id - d_pool.fields['agenthost'] = socket.gethostname() - d_pool.tags['agenthost_domain_id'] = \ - '%s_%s' % (cluster_id, d_pool.fields['agenthost']) - d_pool.fields['bytes_used'] = \ - pool.get('stats', {}).get('bytes_used', 0) - d_pool.fields['max_avail'] = \ - pool.get('stats', {}).get('max_avail', 0) - d_pool.fields['objects'] = \ - pool.get('stats', {}).get('objects', 0) - d_pool.fields['wr_bytes'] = \ - pool.get('stats', {}).get('wr_bytes', 0) - d_pool.fields['dirty'] = \ - pool.get('stats', {}).get('dirty', 0) - d_pool.fields['rd_bytes'] = \ - pool.get('stats', {}).get('rd_bytes', 0) - d_pool.fields['raw_bytes_used'] = \ - pool.get('stats', {}).get('raw_bytes_used', 0) - self.data.append(d_pool) diff --git a/src/pybind/mgr/diskprediction/agent/metrics/db_relay.py b/src/pybind/mgr/diskprediction/agent/metrics/db_relay.py deleted file mode 100644 index 1d5ca239390..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/db_relay.py +++ /dev/null @@ -1,610 +0,0 @@ -from __future__ import absolute_import - -import socket - -from . import MetricsAgent, MetricsField -from ...common import get_human_readable -from ...common.clusterdata import ClusterAPI -from ...common.cypher import CypherOP, NodeInfo - - -class BaseDP(object): - """ basic diskprediction structure """ - _fields = [] - - def __init__(self, *args, **kwargs): - if len(args) > len(self._fields): - raise TypeError('Expected {} arguments'.format(len(self._fields))) - - for name, value in zip(self._fields, args): - setattr(self, name, value) - - for name in self._fields[len(args):]: - setattr(self, name, kwargs.pop(name)) - - if kwargs: - raise TypeError('Invalid argument(s): {}'.format(','.join(kwargs))) - - -class MGRDpCeph(BaseDP): - _fields = [ - 'fsid', 'health', 'max_osd', 'size', - 'avail_size', 'raw_used', 'raw_used_percent' - ] - - -class MGRDpHost(BaseDP): - _fields = ['fsid', 'host', 'ipaddr'] - - -class MGRDpMon(BaseDP): - _fields = ['fsid', 'host', 'ipaddr'] - - -class MGRDpOsd(BaseDP): - _fields = [ - 'fsid', 'host', '_id', 'uuid', 'up', '_in', 'weight', 'public_addr', - 'cluster_addr', 'state', 'backend_filestore_dev_node', - 'backend_filestore_partition_path', 'ceph_release', 'devices', - 'osd_data', 'osd_journal', 'rotational' - ] - - -class MGRDpMds(BaseDP): - _fields = ['fsid', 'host', 'ipaddr'] - - -class MGRDpPool(BaseDP): - _fields = [ - 'fsid', 'size', 'pool_name', 'pool_id', 'type', 'min_size', - 'pg_num', 'pgp_num', 'created_time', 'used', 'pgids' - ] - - -class MGRDpRBD(BaseDP): - _fields = ['fsid', '_id', 'name', 'pool_name', 'size', 'pgids'] - - -class MGRDpPG(BaseDP): - _fields = [ - 'fsid', 'pgid', 'up_osds', 'acting_osds', 'state', - 'objects', 'degraded', 'misplaced', 'unfound' - ] - - -class MGRDpDisk(BaseDP): - _fields = ['host_domain_id', 'model', 'size'] - - -class DBRelay(MetricsField): - """ DB Relay structure """ - measurement = 'db_relay' - - def __init__(self): - super(DBRelay, self).__init__() - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.tags['dc_tag'] = 'na' - self.tags['host'] = None - self.fields['cmd'] = None - - -class DBRelayAgent(MetricsAgent): - measurement = 'db_relay' - - def __init__(self, *args, **kwargs): - super(DBRelayAgent, self).__init__(*args, **kwargs) - self._cluster_node = self._get_cluster_node() - self._cluster_id = self._cluster_node.domain_id - self._host_nodes = dict() - self._osd_nodes = dict() - - def _get_cluster_node(self): - db = ClusterAPI(self._module_inst) - cluster_id = db.get_cluster_id() - dp_cluster = MGRDpCeph( - fsid=cluster_id, - health=db.get_health_status(), - max_osd=db.get_max_osd(), - size=db.get_global_total_size(), - avail_size=db.get_global_avail_size(), - raw_used=db.get_global_raw_used_size(), - raw_used_percent=db.get_global_raw_used_percent() - ) - cluster_id = db.get_cluster_id() - cluster_name = cluster_id[-12:] - cluster_node = NodeInfo( - label='CephCluster', - domain_id=cluster_id, - name='cluster-{}'.format(cluster_name), - meta=dp_cluster.__dict__ - ) - return cluster_node - - def _cluster_contains_host(self): - cluster_id = self._cluster_id - cluster_node = self._cluster_node - - db = ClusterAPI(self._module_inst) - - hosts = set() - - # Add host from osd - osd_data = db.get_osds() - for _data in osd_data: - osd_id = _data['osd'] - if not _data.get('in'): - continue - osd_addr = _data['public_addr'].split(':')[0] - osd_metadata = db.get_osd_metadata(osd_id) - if osd_metadata: - osd_host = osd_metadata['hostname'] - hosts.add((osd_host, osd_addr)) - - # Add host from mon - mons = db.get_mons() - for _data in mons: - mon_host = _data['name'] - mon_addr = _data['public_addr'].split(':')[0] - if mon_host: - hosts.add((mon_host, mon_addr)) - - # Add host from mds - file_systems = db.get_file_systems() - for _data in file_systems: - mds_info = _data.get('mdsmap').get('info') - for _gid in mds_info: - mds_data = mds_info[_gid] - mds_addr = mds_data.get('addr').split(':')[0] - mds_host = mds_data.get('name') - if mds_host: - hosts.add((mds_host, mds_addr)) - - # create node relation - for tp in hosts: - data = DBRelay() - host = tp[0] - self._host_nodes[host] = None - - host_node = NodeInfo( - label='VMHost', - domain_id='{}_{}'.format(cluster_id, host), - name=host, - meta={} - ) - - # add osd node relationship - cypher_cmd = CypherOP.add_link( - cluster_node, - host_node, - 'CephClusterContainsHost' - ) - cluster_host = socket.gethostname() - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self._host_nodes[host] = host_node - self.data.append(data) - - def _host_contains_mon(self): - cluster_id = self._cluster_id - - db = ClusterAPI(self._module_inst) - mons = db.get_mons() - for mon in mons: - mon_name = mon.get('name', '') - mon_addr = mon.get('addr', '').split(':')[0] - for hostname in self._host_nodes: - if hostname != mon_name: - continue - - host_node = self._host_nodes[hostname] - data = DBRelay() - dp_mon = MGRDpMon( - fsid=cluster_id, - host=mon_name, - ipaddr=mon_addr - ) - - # create mon node - mon_node = NodeInfo( - label='CephMon', - domain_id='{}.mon.{}'.format(cluster_id, mon_name), - name=mon_name, - meta=dp_mon.__dict__ - ) - - # add mon node relationship - cypher_cmd = CypherOP.add_link( - host_node, - mon_node, - 'HostContainsMon' - ) - cluster_host = socket.gethostname() - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self.data.append(data) - - def _host_contains_osd(self): - cluster_id = self._cluster_id - - db = ClusterAPI(self._module_inst) - osd_data = db.get_osd_data() - osd_journal = db.get_osd_journal() - for _data in db.get_osds(): - osd_id = _data['osd'] - osd_uuid = _data['uuid'] - osd_up = _data['up'] - osd_in = _data['in'] - if not osd_in: - continue - osd_weight = _data['weight'] - osd_public_addr = _data['public_addr'] - osd_cluster_addr = _data['cluster_addr'] - osd_state = _data['state'] - osd_metadata = db.get_osd_metadata(osd_id) - if osd_metadata: - data = DBRelay() - osd_host = osd_metadata['hostname'] - osd_ceph_version = osd_metadata['ceph_version'] - osd_rotational = osd_metadata['rotational'] - osd_devices = osd_metadata['devices'].split(',') - - # filter 'dm' device. - devices = [] - for devname in osd_devices: - if 'dm' in devname: - continue - devices.append(devname) - - for hostname in self._host_nodes: - if hostname != osd_host: - continue - - self._osd_nodes[str(osd_id)] = None - host_node = self._host_nodes[hostname] - osd_dev_node = None - for dev_node in ['backend_filestore_dev_node', - 'bluestore_bdev_dev_node']: - val = osd_metadata.get(dev_node) - if val and val.lower() != 'unknown': - osd_dev_node = val - break - - osd_dev_path = None - for dev_path in ['backend_filestore_partition_path', - 'bluestore_bdev_partition_path']: - val = osd_metadata.get(dev_path) - if val and val.lower() != 'unknown': - osd_dev_path = val - break - - dp_osd = MGRDpOsd( - fsid=cluster_id, - host=osd_host, - _id=osd_id, - uuid=osd_uuid, - up=osd_up, - _in=osd_in, - weight=osd_weight, - public_addr=osd_public_addr, - cluster_addr=osd_cluster_addr, - state=','.join(osd_state), - backend_filestore_dev_node=osd_dev_node, - backend_filestore_partition_path=osd_dev_path, - ceph_release=osd_ceph_version, - osd_data=osd_data, - osd_journal=osd_journal, - devices=','.join(devices), - rotational=osd_rotational) - - # create osd node - osd_node = NodeInfo( - label='CephOsd', - domain_id='{}.osd.{}'.format(cluster_id, osd_id), - name='OSD.{}'.format(osd_id), - meta=dp_osd.__dict__ - ) - # add osd node relationship - cypher_cmd = CypherOP.add_link( - host_node, - osd_node, - 'HostContainsOsd' - ) - cluster_host = socket.gethostname() - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self._osd_nodes[str(osd_id)] = osd_node - self.data.append(data) - - def _host_contains_mds(self): - cluster_id = self._cluster_id - - db = ClusterAPI(self._module_inst) - file_systems = db.get_file_systems() - - for _data in file_systems: - mds_info = _data.get('mdsmap').get('info') - for _gid in mds_info: - mds_data = mds_info[_gid] - mds_addr = mds_data.get('addr').split(':')[0] - mds_host = mds_data.get('name') - mds_gid = mds_data.get('gid') - - for hostname in self._host_nodes: - if hostname != mds_host: - continue - - data = DBRelay() - host_node = self._host_nodes[hostname] - dp_mds = MGRDpMds( - fsid=cluster_id, - host=mds_host, - ipaddr=mds_addr - ) - - # create osd node - mds_node = NodeInfo( - label='CephMds', - domain_id='{}.mds.{}'.format(cluster_id, mds_gid), - name='MDS.{}'.format(mds_gid), - meta=dp_mds.__dict__ - ) - # add osd node relationship - cypher_cmd = CypherOP.add_link( - host_node, - mds_node, - 'HostContainsMds' - ) - cluster_host = socket.gethostname() - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self.data.append(data) - - def _osd_contains_pg(self): - cluster_id = self._cluster_id - db = ClusterAPI(self._module_inst) - - pg_stats = db.get_pg_stats() - for osd_data in db.get_osds(): - osd_id = osd_data['osd'] - if not osd_data.get('in'): - continue - for _data in pg_stats: - state = _data.get('state') - up = _data.get('up') - acting = _data.get('acting') - pgid = _data.get('pgid') - stat_sum = _data.get('stat_sum', {}) - num_objects = stat_sum.get('num_objects') - num_objects_degraded = stat_sum.get('num_objects_degraded') - num_objects_misplaced = stat_sum.get('num_objects_misplaced') - num_objects_unfound = stat_sum.get('num_objects_unfound') - if osd_id in up: - if str(osd_id) not in self._osd_nodes: - continue - osd_node = self._osd_nodes[str(osd_id)] - data = DBRelay() - dp_pg = MGRDpPG( - fsid=cluster_id, - pgid=pgid, - up_osds=','.join(str(x) for x in up), - acting_osds=','.join(str(x) for x in acting), - state=state, - objects=num_objects, - degraded=num_objects_degraded, - misplaced=num_objects_misplaced, - unfound=num_objects_unfound - ) - - # create pg node - pg_node = NodeInfo( - label='CephPG', - domain_id='{}.pg.{}'.format(cluster_id, pgid), - name='PG.{}'.format(pgid), - meta=dp_pg.__dict__ - ) - - # add pg node relationship - cypher_cmd = CypherOP.add_link( - osd_node, - pg_node, - 'OsdContainsPg' - ) - cluster_host = socket.gethostname() - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self.data.append(data) - - def _osd_contains_disk(self): - cluster_id = self._cluster_id - db = ClusterAPI(self._module_inst) - - osd_metadata = db.get_osd_metadata() - for osd_id in osd_metadata: - osds_smart = db.get_osd_smart(osd_id) - if not osds_smart: - continue - - if str(osd_id) not in self._osd_nodes: - continue - - hostname = db.get_osd_hostname(osd_id) - osd_node = self._osd_nodes[str(osd_id)] - for dev_name, s_val in osds_smart.iteritems(): - data = DBRelay() - disk_domain_id = str(dev_name) - try: - if isinstance(s_val.get('user_capacity'), dict): - user_capacity = \ - s_val['user_capacity'].get('bytes', {}).get('n', 0) - else: - user_capacity = s_val.get('user_capacity', 0) - except ValueError: - user_capacity = 0 - dp_disk = MGRDpDisk( - host_domain_id='{}_{}'.format(cluster_id, hostname), - model=s_val.get('model_name', ''), - size=get_human_readable( - int(user_capacity), 0) - ) - - # create disk node - disk_node = NodeInfo( - label='VMDisk', - domain_id=disk_domain_id, - name=dev_name, - meta=dp_disk.__dict__ - ) - - # add disk node relationship - cypher_cmd = CypherOP.add_link( - osd_node, - disk_node, - 'DiskOfOsd' - ) - cluster_host = socket.gethostname() - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self.data.append(data) - - # host node and disk node relationship - data = DBRelay() - host_node = NodeInfo( - label='VMHost', - domain_id='{}_{}'.format(cluster_id, hostname), - name=hostname, - meta={} - ) - - # add osd node relationship - cypher_cmd = CypherOP.add_link( - host_node, - disk_node, - 'VmHostContainsVmDisk' - ) - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self.data.append(data) - self.data.append(data) - - def _rbd_contains_pg(self): - cluster_id = self._cluster_id - db = ClusterAPI(self._module_inst) - - pg_stats = db.get_pg_stats() - pools = db.get_osd_pools() - for pool_data in pools: - pool_name = pool_data.get('pool_name') - rbd_list = db.get_rbd_list(pool_name=pool_name) - for rbd_data in rbd_list: - image_name = rbd_data.get('name') - # Sometimes get stuck on query rbd objects info - rbd_info = db.get_rbd_info(pool_name, image_name) - rbd_id = rbd_info.get('id') - rbd_size = rbd_info.get('size') - rbd_pgids = rbd_info.get('pgs', []) - - pgids = [] - for _data in rbd_pgids: - pgid = _data.get('pgid') - if pgid: - pgids.append(pgid) - - # RBD info - dp_rbd = MGRDpRBD( - fsid=cluster_id, - _id=rbd_id, - name=image_name, - pool_name=pool_name, - size=rbd_size, - pgids=','.join(pgids) - ) - - # create rbd node - rbd_node = NodeInfo( - label='CephRBD', - domain_id='{}.rbd.{}'.format(cluster_id, image_name), - name=image_name, - meta=dp_rbd.__dict__ - ) - - for _data in pg_stats: - pgid = _data.get('pgid') - if pgid not in pgids: - continue - - state = _data.get('state') - up = _data.get('up') - acting = _data.get('acting') - stat_sum = _data.get('stat_sum', {}) - num_objects = stat_sum.get('num_objects') - num_objects_degraded = stat_sum.get('num_objects_degraded') - num_objects_misplaced = stat_sum.get('num_objects_misplaced') - num_objects_unfound = stat_sum.get('num_objects_unfound') - - data = DBRelay() - dp_pg = MGRDpPG( - fsid=cluster_id, - pgid=pgid, - up_osds=','.join(str(x) for x in up), - acting_osds=','.join(str(x) for x in acting), - state=state, - objects=num_objects, - degraded=num_objects_degraded, - misplaced=num_objects_misplaced, - unfound=num_objects_unfound - ) - - # create pg node - pg_node = NodeInfo( - label='CephPG', - domain_id='{}.pg.{}'.format(cluster_id, pgid), - name='PG.{}'.format(pgid), - meta=dp_pg.__dict__ - ) - - # add rbd node relationship - cypher_cmd = CypherOP.add_link( - rbd_node, - pg_node, - 'RbdContainsPg' - ) - cluster_host = socket.gethostname() - data.fields['agenthost'] = cluster_host - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['host'] = cluster_host - data.fields['cmd'] = str(cypher_cmd) - self.data.append(data) - - def _collect_data(self): - if not self._module_inst: - return - - self._cluster_contains_host() - self._host_contains_osd() - self._host_contains_mon() - self._host_contains_mds() - self._osd_contains_pg() - self._osd_contains_disk() diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_agent.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_agent.py deleted file mode 100644 index 63c8e870f71..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/sai_agent.py +++ /dev/null @@ -1,71 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -from __future__ import absolute_import - -import socket -import time - -from . import AGENT_VERSION, MetricsAgent, MetricsField -from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING -from ...common.clusterdata import ClusterAPI - - -class SAIAgentFields(MetricsField): - """ SAI DiskSmart structure """ - measurement = 'sai_agent' - - def __init__(self): - super(SAIAgentFields, self).__init__() - self.tags['agenthost_domain_id'] = None - self.fields['agent_type'] = str('ceph') - self.fields['agent_version'] = str(AGENT_VERSION) - self.fields['agenthost'] = '' - self.fields['cluster_domain_id'] = '' - self.fields['heartbeat_interval'] = '' - self.fields['host_ip'] = '' - self.fields['host_name'] = '' - self.fields['is_error'] = False - self.fields['is_ceph_error'] = False - self.fields['needs_warning'] = False - self.fields['send'] = None - - -class SAIAgent(MetricsAgent): - measurement = 'sai_agent' - - def _collect_data(self): - mgr_id = [] - c_data = SAIAgentFields() - obj_api = ClusterAPI(self._module_inst) - svc_data = obj_api.get_server(socket.gethostname()) - cluster_state = obj_api.get_health_status() - if not svc_data: - raise Exception('unable to get %s service info' % socket.gethostname()) - # Filter mgr id - for s in svc_data.get('services', []): - if s.get('type', '') == 'mgr': - mgr_id.append(s.get('id')) - - for _id in mgr_id: - mgr_meta = obj_api.get_mgr_metadata(_id) - cluster_id = obj_api.get_cluster_id() - c_data.fields['cluster_domain_id'] = str(cluster_id) - c_data.fields['agenthost'] = str(socket.gethostname()) - c_data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, c_data.fields['agenthost'])) - c_data.fields['heartbeat_interval'] = \ - int(obj_api.get_configuration('diskprediction_upload_metrics_interval')) - c_data.fields['host_ip'] = str(mgr_meta.get('addr', '127.0.0.1')) - c_data.fields['host_name'] = str(socket.gethostname()) - if obj_api.module.status.get('status', '') in [DP_MGR_STAT_WARNING, DP_MGR_STAT_FAILED]: - c_data.fields['is_error'] = bool(True) - else: - c_data.fields['is_error'] = bool(False) - if cluster_state in ['HEALTH_ERR', 'HEALTH_WARN']: - c_data.fields['is_ceph_error'] = bool(True) - c_data.fields['needs_warning'] = bool(True) - c_data.fields['is_error'] = bool(True) - c_data.fields['problems'] = str(obj_api.get_health_checks()) - else: - c_data.fields['is_ceph_error'] = bool(False) - c_data.fields['send'] = int(time.time() * 1000) - self.data.append(c_data) diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_cluster.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_cluster.py deleted file mode 100644 index ac9cab9aa9a..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/sai_cluster.py +++ /dev/null @@ -1,36 +0,0 @@ -from __future__ import absolute_import - -import socket - -from . import AGENT_VERSION, MetricsAgent, MetricsField -from ...common.clusterdata import ClusterAPI - - -class SAIClusterFields(MetricsField): - """ SAI Host structure """ - measurement = 'sai_cluster' - - def __init__(self): - super(SAIClusterFields, self).__init__() - self.tags['domain_id'] = None - self.fields['agenthost'] = None - self.fields['agenthost_domain_id'] = None - self.fields['name'] = None - self.fields['agent_version'] = str(AGENT_VERSION) - - -class SAICluserAgent(MetricsAgent): - measurement = 'sai_cluster' - - def _collect_data(self): - c_data = SAIClusterFields() - obj_api = ClusterAPI(self._module_inst) - cluster_id = obj_api.get_cluster_id() - - c_data.tags['domain_id'] = str(cluster_id) - c_data.tags['host_domain_id'] = '%s_%s' % (str(cluster_id), str(socket.gethostname())) - c_data.fields['agenthost'] = str(socket.gethostname()) - c_data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, c_data.fields['agenthost'])) - c_data.fields['name'] = 'ceph mgr plugin' - self.data.append(c_data) diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_disk.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_disk.py deleted file mode 100644 index 447b24ba030..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/sai_disk.py +++ /dev/null @@ -1,172 +0,0 @@ -from __future__ import absolute_import - -import socket - -from . import AGENT_VERSION, MetricsAgent, MetricsField -from ...common import get_human_readable -from ...common.clusterdata import ClusterAPI - - -class SAIDiskFields(MetricsField): - """ SAI Disk structure """ - measurement = 'sai_disk' - - def __init__(self): - super(SAIDiskFields, self).__init__() - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.tags['disk_domain_id'] = None - self.tags['disk_name'] = None - self.tags['disk_wwn'] = None - self.tags['primary_key'] = None - self.fields['cluster_domain_id'] = None - self.fields['host_domain_id'] = None - self.fields['model'] = None - self.fields['serial_number'] = None - self.fields['size'] = None - self.fields['vendor'] = None - self.fields['agent_version'] = str(AGENT_VERSION) - - """disk_status - 0: unknown 1: good 2: failure - """ - self.fields['disk_status'] = 0 - - """disk_type - 0: unknown 1: HDD 2: SSD 3: SSD NVME - 4: SSD SAS 5: SSD SATA 6: HDD SAS 7: HDD SATA - """ - self.fields['disk_type'] = 0 - - -class SAIDiskAgent(MetricsAgent): - measurement = 'sai_disk' - - @staticmethod - def _convert_disk_type(is_ssd, sata_version, protocol): - """ return type: - 0: "Unknown', 1: 'HDD', - 2: 'SSD", 3: "SSD NVME", - 4: "SSD SAS", 5: "SSD SATA", - 6: "HDD SAS", 7: "HDD SATA" - """ - if is_ssd: - if sata_version and not protocol: - disk_type = 5 - elif 'SCSI'.lower() in protocol.lower(): - disk_type = 4 - elif 'NVMe'.lower() in protocol.lower(): - disk_type = 3 - else: - disk_type = 2 - else: - if sata_version and not protocol: - disk_type = 7 - elif 'SCSI'.lower() in protocol.lower(): - disk_type = 6 - else: - disk_type = 1 - return disk_type - - def _collect_data(self): - # process data and save to 'self.data' - obj_api = ClusterAPI(self._module_inst) - cluster_id = obj_api.get_cluster_id() - osds = obj_api.get_osds() - for osd in osds: - if osd.get('osd') is None: - continue - if not osd.get('in'): - continue - osds_meta = obj_api.get_osd_metadata(osd.get('osd')) - if not osds_meta: - continue - osds_smart = obj_api.get_osd_smart(osd.get('osd')) - if not osds_smart: - continue - for dev_name, s_val in osds_smart.iteritems(): - d_data = SAIDiskFields() - d_data.tags['disk_name'] = str(dev_name) - d_data.fields['cluster_domain_id'] = str(cluster_id) - d_data.tags['host_domain_id'] = \ - str('%s_%s' - % (cluster_id, osds_meta.get('hostname', 'None'))) - d_data.fields['agenthost'] = str(socket.gethostname()) - d_data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, d_data.fields['agenthost'])) - serial_number = s_val.get('serial_number') - wwn = s_val.get('wwn', {}) - wwpn = '' - if wwn: - wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0)) - for k in wwn.keys(): - if k in ['naa', 't10', 'eui', 'iqn']: - wwpn = ('%X%s' % (wwn[k], wwpn)).lower() - break - - if wwpn: - d_data.tags['disk_domain_id'] = str(dev_name) - d_data.tags['disk_wwn'] = str(wwpn) - if serial_number: - d_data.fields['serial_number'] = str(serial_number) - else: - d_data.fields['serial_number'] = str(wwpn) - elif serial_number: - d_data.tags['disk_domain_id'] = str(dev_name) - d_data.fields['serial_number'] = str(serial_number) - if wwpn: - d_data.tags['disk_wwn'] = str(wwpn) - else: - d_data.tags['disk_wwn'] = str(serial_number) - else: - d_data.tags['disk_domain_id'] = str(dev_name) - d_data.tags['disk_wwn'] = str(dev_name) - d_data.fields['serial_number'] = str(dev_name) - d_data.tags['primary_key'] = \ - str('%s%s%s' - % (cluster_id, d_data.tags['host_domain_id'], - d_data.tags['disk_domain_id'])) - d_data.fields['disk_status'] = int(1) - is_ssd = True if s_val.get('rotation_rate') == 0 else False - vendor = s_val.get('vendor', None) - model = s_val.get('model_name', None) - if s_val.get('sata_version', {}).get('string'): - sata_version = s_val['sata_version']['string'] - else: - sata_version = '' - if s_val.get('device', {}).get('protocol'): - protocol = s_val['device']['protocol'] - else: - protocol = '' - d_data.fields['disk_type'] = \ - self._convert_disk_type(is_ssd, sata_version, protocol) - d_data.fields['firmware_version'] = \ - str(s_val.get('firmware_version')) - if model: - d_data.fields['model'] = str(model) - if vendor: - d_data.fields['vendor'] = str(vendor) - if sata_version: - d_data.fields['sata_version'] = str(sata_version) - if s_val.get('logical_block_size'): - d_data.fields['sector_size'] = \ - str(str(s_val['logical_block_size'])) - d_data.fields['transport_protocol'] = str('') - d_data.fields['vendor'] = \ - str(s_val.get('model_family', '')).replace('\"', '\'') - try: - if isinstance(s_val.get('user_capacity'), dict): - user_capacity = \ - s_val['user_capacity'].get('bytes', {}).get('n', 0) - else: - user_capacity = s_val.get('user_capacity', 0) - except ValueError: - user_capacity = 0 - d_data.fields['size'] = \ - get_human_readable(int(user_capacity), 0) - - if s_val.get('smart_status', {}).get('passed'): - d_data.fields['smart_health_status'] = 'PASSED' - else: - d_data.fields['smart_health_status'] = 'FAILED' - self.data.append(d_data) diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_disk_smart.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_disk_smart.py deleted file mode 100644 index 509d9263aa8..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/sai_disk_smart.py +++ /dev/null @@ -1,156 +0,0 @@ -from __future__ import absolute_import - -import datetime -import json -import _strptime -import socket -import time - -from . import AGENT_VERSION, MetricsAgent, MetricsField -from ...common.clusterdata import ClusterAPI - - -class SAIDiskSmartFields(MetricsField): - """ SAI DiskSmart structure """ - measurement = 'sai_disk_smart' - - def __init__(self): - super(SAIDiskSmartFields, self).__init__() - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.tags['disk_domain_id'] = None - self.tags['disk_name'] = None - self.tags['disk_wwn'] = None - self.tags['primary_key'] = None - self.fields['cluster_domain_id'] = None - self.fields['host_domain_id'] = None - self.fields['agent_version'] = str(AGENT_VERSION) - - -class SAIDiskSmartAgent(MetricsAgent): - measurement = 'sai_disk_smart' - - def _collect_data(self): - # process data and save to 'self.data' - obj_api = ClusterAPI(self._module_inst) - cluster_id = obj_api.get_cluster_id() - osds = obj_api.get_osds() - for osd in osds: - if osd.get('osd') is None: - continue - if not osd.get('in'): - continue - osds_meta = obj_api.get_osd_metadata(osd.get('osd')) - if not osds_meta: - continue - devs_info = obj_api.get_osd_device_id(osd.get('osd')) - if devs_info: - for dev_name, dev_info in devs_info.iteritems(): - osds_smart = obj_api.get_device_health(dev_info['dev_id']) - if not osds_smart: - continue - # Always pass through last smart data record - o_key = sorted(osds_smart.iterkeys(), reverse=True)[0] - if o_key: - s_date = o_key - s_val = osds_smart[s_date] - smart_data = SAIDiskSmartFields() - smart_data.tags['disk_name'] = str(dev_name) - smart_data.fields['cluster_domain_id'] = str(cluster_id) - smart_data.tags['host_domain_id'] = \ - str('%s_%s' - % (cluster_id, osds_meta.get('hostname', 'None'))) - smart_data.fields['agenthost'] = str(socket.gethostname()) - smart_data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, smart_data.fields['agenthost'])) - # parse attributes - ata_smart = s_val.get('ata_smart_attributes', {}) - for attr in ata_smart.get('table', []): - if attr.get('raw', {}).get('string'): - if str(attr.get('raw', {}).get('string', '0')).isdigit(): - smart_data.fields['%s_raw' % attr.get('id')] = \ - int(attr.get('raw', {}).get('string', '0')) - else: - if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit(): - smart_data.fields['%s_raw' % attr.get('id')] = \ - int(attr.get('raw', {}).get('string', '0').split(' ')[0]) - else: - smart_data.fields['%s_raw' % attr.get('id')] = \ - attr.get('raw', {}).get('value', 0) - smart_data.fields['raw_data'] = str(json.dumps(osds_smart[s_date]).replace("\"", "\'")) - if s_val.get('temperature', {}).get('current') is not None: - smart_data.fields['CurrentDriveTemperature_raw'] = \ - int(s_val['temperature']['current']) - if s_val.get('temperature', {}).get('drive_trip') is not None: - smart_data.fields['DriveTripTemperature_raw'] = \ - int(s_val['temperature']['drive_trip']) - if s_val.get('elements_grown_list') is not None: - smart_data.fields['ElementsInGrownDefectList_raw'] = int(s_val['elements_grown_list']) - if s_val.get('power_on_time', {}).get('hours') is not None: - smart_data.fields['9_raw'] = int(s_val['power_on_time']['hours']) - if s_val.get('scsi_percentage_used_endurance_indicator') is not None: - smart_data.fields['PercentageUsedEnduranceIndicator_raw'] = \ - int(s_val['scsi_percentage_used_endurance_indicator']) - if s_val.get('scsi_error_counter_log') is not None: - s_err_counter = s_val['scsi_error_counter_log'] - for s_key in s_err_counter.keys(): - if s_key.lower() in ['read', 'write']: - for s1_key in s_err_counter[s_key].keys(): - if s1_key.lower() == 'errors_corrected_by_eccfast': - smart_data.fields['ErrorsCorrectedbyECCFast%s_raw' % s_key.capitalize()] = \ - int(s_err_counter[s_key]['errors_corrected_by_eccfast']) - elif s1_key.lower() == 'errors_corrected_by_eccdelayed': - smart_data.fields['ErrorsCorrectedbyECCDelayed%s_raw' % s_key.capitalize()] = \ - int(s_err_counter[s_key]['errors_corrected_by_eccdelayed']) - elif s1_key.lower() == 'errors_corrected_by_rereads_rewrites': - smart_data.fields['ErrorCorrectedByRereadsRewrites%s_raw' % s_key.capitalize()] = \ - int(s_err_counter[s_key]['errors_corrected_by_rereads_rewrites']) - elif s1_key.lower() == 'total_errors_corrected': - smart_data.fields['TotalErrorsCorrected%s_raw' % s_key.capitalize()] = \ - int(s_err_counter[s_key]['total_errors_corrected']) - elif s1_key.lower() == 'correction_algorithm_invocations': - smart_data.fields['CorrectionAlgorithmInvocations%s_raw' % s_key.capitalize()] = \ - int(s_err_counter[s_key]['correction_algorithm_invocations']) - elif s1_key.lower() == 'gigabytes_processed': - smart_data.fields['GigaBytesProcessed%s_raw' % s_key.capitalize()] = \ - float(s_err_counter[s_key]['gigabytes_processed']) - elif s1_key.lower() == 'total_uncorrected_errors': - smart_data.fields['TotalUncorrectedErrors%s_raw' % s_key.capitalize()] = \ - int(s_err_counter[s_key]['total_uncorrected_errors']) - - serial_number = s_val.get('serial_number') - wwn = s_val.get('wwn', {}) - wwpn = '' - if wwn: - wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0)) - for k in wwn.keys(): - if k in ['naa', 't10', 'eui', 'iqn']: - wwpn = ('%X%s' % (wwn[k], wwpn)).lower() - break - if wwpn: - smart_data.tags['disk_domain_id'] = str(dev_info['dev_id']) - smart_data.tags['disk_wwn'] = str(wwpn) - if serial_number: - smart_data.fields['serial_number'] = str(serial_number) - else: - smart_data.fields['serial_number'] = str(wwpn) - elif serial_number: - smart_data.tags['disk_domain_id'] = str(dev_info['dev_id']) - smart_data.fields['serial_number'] = str(serial_number) - if wwpn: - smart_data.tags['disk_wwn'] = str(wwpn) - else: - smart_data.tags['disk_wwn'] = str(serial_number) - else: - smart_data.tags['disk_domain_id'] = str(dev_info['dev_id']) - smart_data.tags['disk_wwn'] = str(dev_name) - smart_data.fields['serial_number'] = str(dev_name) - smart_data.tags['primary_key'] = \ - str('%s%s%s' - % (cluster_id, - smart_data.tags['host_domain_id'], - smart_data.tags['disk_domain_id'])) - smart_data.timestamp = \ - time.mktime(datetime.datetime.strptime( - s_date, '%Y%m%d-%H%M%S').timetuple()) - self.data.append(smart_data) diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_host.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_host.py deleted file mode 100644 index f3fc8ba257a..00000000000 --- a/src/pybind/mgr/diskprediction/agent/metrics/sai_host.py +++ /dev/null @@ -1,105 +0,0 @@ -from __future__ import absolute_import - -import socket - -from . import AGENT_VERSION, MetricsAgent, MetricsField -from ...common.clusterdata import ClusterAPI - - -class SAIHostFields(MetricsField): - """ SAI Host structure """ - measurement = 'sai_host' - - def __init__(self): - super(SAIHostFields, self).__init__() - self.tags['domain_id'] = None - self.fields['agenthost'] = None - self.tags['agenthost_domain_id'] = None - self.fields['cluster_domain_id'] = None - self.fields['name'] = None - self.fields['host_ip'] = None - self.fields['host_ipv6'] = None - self.fields['host_uuid'] = None - self.fields['os_type'] = str('ceph') - self.fields['os_name'] = None - self.fields['os_version'] = None - self.fields['agent_version'] = str(AGENT_VERSION) - - -class SAIHostAgent(MetricsAgent): - measurement = 'sai_host' - - def _collect_data(self): - db = ClusterAPI(self._module_inst) - cluster_id = db.get_cluster_id() - - hosts = set() - - osd_data = db.get_osds() - for _data in osd_data: - osd_id = _data['osd'] - if not _data.get('in'): - continue - osd_addr = _data['public_addr'].split(':')[0] - osd_metadata = db.get_osd_metadata(osd_id) - if osd_metadata: - osd_host = osd_metadata.get('hostname', 'None') - if osd_host not in hosts: - data = SAIHostFields() - data.fields['agenthost'] = str(socket.gethostname()) - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['domain_id'] = \ - str('%s_%s' % (cluster_id, osd_host)) - data.fields['cluster_domain_id'] = str(cluster_id) - data.fields['host_ip'] = osd_addr - data.fields['host_uuid'] = \ - str('%s_%s' % (cluster_id, osd_host)) - data.fields['os_name'] = \ - osd_metadata.get('ceph_release', '') - data.fields['os_version'] = \ - osd_metadata.get('ceph_version_short', '') - data.fields['name'] = osd_host - hosts.add(osd_host) - self.data.append(data) - - mons = db.get_mons() - for _data in mons: - mon_host = _data['name'] - mon_addr = _data['public_addr'].split(':')[0] - if mon_host not in hosts: - data = SAIHostFields() - data.fields['agenthost'] = str(socket.gethostname()) - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['domain_id'] = \ - str('%s_%s' % (cluster_id, mon_host)) - data.fields['cluster_domain_id'] = str(cluster_id) - data.fields['host_ip'] = mon_addr - data.fields['host_uuid'] = \ - str('%s_%s' % (cluster_id, mon_host)) - data.fields['name'] = mon_host - hosts.add((mon_host, mon_addr)) - self.data.append(data) - - file_systems = db.get_file_systems() - for _data in file_systems: - mds_info = _data.get('mdsmap').get('info') - for _gid in mds_info: - mds_data = mds_info[_gid] - mds_addr = mds_data.get('addr').split(':')[0] - mds_host = mds_data.get('name') - if mds_host not in hosts: - data = SAIHostFields() - data.fields['agenthost'] = str(socket.gethostname()) - data.tags['agenthost_domain_id'] = \ - str('%s_%s' % (cluster_id, data.fields['agenthost'])) - data.tags['domain_id'] = \ - str('%s_%s' % (cluster_id, mds_host)) - data.fields['cluster_domain_id'] = str(cluster_id) - data.fields['host_ip'] = mds_addr - data.fields['host_uuid'] = \ - str('%s_%s' % (cluster_id, mds_host)) - data.fields['name'] = mds_host - hosts.add((mds_host, mds_addr)) - self.data.append(data) diff --git a/src/pybind/mgr/diskprediction/agent/predict/__init__.py b/src/pybind/mgr/diskprediction/agent/predict/__init__.py deleted file mode 100644 index 28e2eebd2b7..00000000000 --- a/src/pybind/mgr/diskprediction/agent/predict/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import absolute_import diff --git a/src/pybind/mgr/diskprediction/agent/predict/prediction.py b/src/pybind/mgr/diskprediction/agent/predict/prediction.py deleted file mode 100644 index 27ba3335422..00000000000 --- a/src/pybind/mgr/diskprediction/agent/predict/prediction.py +++ /dev/null @@ -1,216 +0,0 @@ -from __future__ import absolute_import -import datetime - -from .. import BaseAgent -from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK -from ...common.clusterdata import ClusterAPI - -PREDICTION_FILE = '/var/tmp/disk_prediction.json' - -TIME_DAYS = 24*60*60 -TIME_WEEK = TIME_DAYS * 7 - - -class PredictionAgent(BaseAgent): - - measurement = 'sai_disk_prediction' - - @staticmethod - def _get_disk_type(is_ssd, vendor, model): - """ return type: - 0: "Unknown", 1: "HDD", - 2: "SSD", 3: "SSD NVME", - 4: "SSD SAS", 5: "SSD SATA", - 6: "HDD SAS", 7: "HDD SATA" - """ - if is_ssd: - if vendor: - disk_type = 4 - elif model: - disk_type = 5 - else: - disk_type = 2 - else: - if vendor: - disk_type = 6 - elif model: - disk_type = 7 - else: - disk_type = 1 - return disk_type - - def _store_prediction_result(self, result): - self._module_inst._prediction_result = result - - def _parse_prediction_data(self, host_domain_id, disk_domain_id): - result = {} - try: - query_info = self._client.query_info( - host_domain_id, disk_domain_id, 'sai_disk_prediction') - status_code = query_info.status_code - if status_code == 200: - result = query_info.json() - self._module_inst.status = {'status': DP_MGR_STAT_OK} - else: - resp = query_info.json() - if resp.get('error'): - self._logger.error(str(resp['error'])) - self._module_inst.status = \ - {'status': DP_MGR_STAT_FAILED, - 'reason': 'failed to parse device {} prediction data'.format(disk_domain_id)} - except Exception as e: - self._logger.error(str(e)) - return result - - @staticmethod - def _convert_timestamp(predicted_timestamp, life_expectancy_day): - """ - - :param predicted_timestamp: unit is nanoseconds - :param life_expectancy_day: unit is seconds - :return: - date format '%Y-%m-%d' ex. 2018-01-01 - """ - return datetime.datetime.fromtimestamp( - predicted_timestamp / (1000 ** 3) + life_expectancy_day).strftime('%Y-%m-%d') - - def _fetch_prediction_result(self): - obj_api = ClusterAPI(self._module_inst) - cluster_id = obj_api.get_cluster_id() - - result = {} - osds = obj_api.get_osds() - for osd in osds: - osd_id = osd.get('osd') - if osd_id is None: - continue - if not osd.get('in'): - continue - osds_meta = obj_api.get_osd_metadata(osd_id) - if not osds_meta: - continue - osds_smart = obj_api.get_osd_smart(osd_id) - if not osds_smart: - continue - - hostname = osds_meta.get('hostname', 'None') - host_domain_id = '%s_%s' % (cluster_id, hostname) - - for dev_name, s_val in osds_smart.iteritems(): - is_ssd = True if s_val.get('rotation_rate') == 0 else False - vendor = s_val.get('vendor', '') - model = s_val.get('model_name', '') - disk_type = self._get_disk_type(is_ssd, vendor, model) - serial_number = s_val.get('serial_number') - wwn = s_val.get('wwn', {}) - wwpn = '' - if wwn: - wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0)) - for k in wwn.keys(): - if k in ['naa', 't10', 'eui', 'iqn']: - wwpn = ('%X%s' % (wwn[k], wwpn)).lower() - break - - tmp = {} - if wwpn: - tmp['disk_domain_id'] = dev_name - tmp['disk_wwn'] = wwpn - if serial_number: - tmp['serial_number'] = serial_number - else: - tmp['serial_number'] = wwpn - elif serial_number: - tmp['disk_domain_id'] = dev_name - tmp['serial_number'] = serial_number - if wwpn: - tmp['disk_wwn'] = wwpn - else: - tmp['disk_wwn'] = serial_number - else: - tmp['disk_domain_id'] = dev_name - tmp['disk_wwn'] = dev_name - tmp['serial_number'] = dev_name - - if s_val.get('smart_status', {}).get('passed'): - tmp['smart_health_status'] = 'PASSED' - else: - tmp['smart_health_status'] = 'FAILED' - - tmp['sata_version'] = s_val.get('sata_version', {}).get('string', '') - tmp['sector_size'] = str(s_val.get('logical_block_size', '')) - try: - if isinstance(s_val.get('user_capacity'), dict): - user_capacity = \ - s_val['user_capacity'].get('bytes', {}).get('n', 0) - else: - user_capacity = s_val.get('user_capacity', 0) - except ValueError: - user_capacity = 0 - disk_info = { - 'disk_name': dev_name, - 'disk_type': str(disk_type), - 'disk_status': '1', - 'disk_wwn': tmp['disk_wwn'], - 'dp_disk_idd': tmp['disk_domain_id'], - 'serial_number': tmp['serial_number'], - 'vendor': vendor, - 'sata_version': tmp['sata_version'], - 'smart_healthStatus': tmp['smart_health_status'], - 'sector_size': tmp['sector_size'], - 'size': str(user_capacity), - 'prediction': self._parse_prediction_data( - host_domain_id, tmp['disk_domain_id']) - } - # Update osd life-expectancy - predicted = None - life_expectancy_day_min = None - life_expectancy_day_max = None - devs_info = obj_api.get_osd_device_id(osd_id) - if disk_info.get('prediction', {}).get('predicted'): - predicted = int(disk_info['prediction']['predicted']) - if disk_info.get('prediction', {}).get('near_failure'): - if disk_info['prediction']['near_failure'].lower() == 'good': - life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS - life_expectancy_day_max = None - elif disk_info['prediction']['near_failure'].lower() == 'warning': - life_expectancy_day_min = (TIME_WEEK * 2) - life_expectancy_day_max = (TIME_WEEK * 6) - elif disk_info['prediction']['near_failure'].lower() == 'bad': - life_expectancy_day_min = 0 - life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS - else: - # Near failure state is unknown. - predicted = None - life_expectancy_day_min = None - life_expectancy_day_max = None - - if predicted and tmp['disk_domain_id'] and life_expectancy_day_min: - from_date = None - to_date = None - try: - if life_expectancy_day_min: - from_date = self._convert_timestamp(predicted, life_expectancy_day_min) - - if life_expectancy_day_max: - to_date = self._convert_timestamp(predicted, life_expectancy_day_max) - - obj_api.set_device_life_expectancy(tmp['disk_domain_id'], from_date, to_date) - self._logger.info( - 'succeed to set device {} life expectancy from: {}, to: {}'.format( - tmp['disk_domain_id'], from_date, to_date)) - except Exception as e: - self._logger.error( - 'failed to set device {} life expectancy from: {}, to: {}, {}'.format( - tmp['disk_domain_id'], from_date, to_date, str(e))) - else: - if tmp['disk_domain_id']: - obj_api.reset_device_life_expectancy(tmp['disk_domain_id']) - if tmp['disk_domain_id']: - result[tmp['disk_domain_id']] = disk_info - - return result - - def run(self): - result = self._fetch_prediction_result() - if result: - self._store_prediction_result(result) diff --git a/src/pybind/mgr/diskprediction/common/__init__.py b/src/pybind/mgr/diskprediction/common/__init__.py deleted file mode 100644 index cbc3c30ebf6..00000000000 --- a/src/pybind/mgr/diskprediction/common/__init__.py +++ /dev/null @@ -1,59 +0,0 @@ -from __future__ import absolute_import -import errno -from functools import wraps -from six.moves.http_client import BAD_REQUEST -import os -import signal - - -DP_MGR_STAT_OK = 'OK' -DP_MGR_STAT_WARNING = 'WARNING' -DP_MGR_STAT_FAILED = 'FAILED' -DP_MGR_STAT_DISABLED = 'DISABLED' -DP_MGR_STAT_ENABLED = 'ENABLED' - - -class DummyResonse: - def __init__(self): - self.resp_json = dict() - self.content = 'DummyResponse' - self.status_code = BAD_REQUEST - - def json(self): - return self.resp_json - - -class TimeoutError(Exception): - pass - - -def timeout(seconds=10, error_message=os.strerror(errno.ETIME)): - def decorator(func): - def _handle_timeout(signum, frame): - raise TimeoutError(error_message) - - def wrapper(*args, **kwargs): - if hasattr(args[0], '_timeout') is not None: - seconds = args[0]._timeout - signal.signal(signal.SIGALRM, _handle_timeout) - signal.alarm(seconds) - try: - result = func(*args, **kwargs) - finally: - signal.alarm(0) - return result - - return wraps(func)(wrapper) - - return decorator - - -def get_human_readable(size, precision=2): - suffixes = ['B', 'KB', 'MB', 'GB', 'TB'] - suffix_index = 0 - while size > 1000 and suffix_index < 4: - # increment the index of the suffix - suffix_index += 1 - # apply the division - size = size/1000.0 - return '%.*d %s' % (precision, size, suffixes[suffix_index]) diff --git a/src/pybind/mgr/diskprediction/common/client_pb2.py b/src/pybind/mgr/diskprediction/common/client_pb2.py deleted file mode 100644 index 9f65c731a84..00000000000 --- a/src/pybind/mgr/diskprediction/common/client_pb2.py +++ /dev/null @@ -1,1775 +0,0 @@ -# Generated by the protocol buffer compiler. DO NOT EDIT! -# source: mainServer.proto - -import sys -_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) -from google.protobuf import descriptor as _descriptor -from google.protobuf import message as _message -from google.protobuf import reflection as _reflection -from google.protobuf import symbol_database as _symbol_database -from google.protobuf import descriptor_pb2 -# @@protoc_insertion_point(imports) - -_sym_db = _symbol_database.Default() - - -from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 - - -DESCRIPTOR = _descriptor.FileDescriptor( - name='mainServer.proto', - package='proto', - syntax='proto3', - serialized_pb=_b('\n\x10mainServer.proto\x12\x05proto\x1a\x1cgoogle/api/annotations.proto\"\x07\n\x05\x45mpty\"#\n\x10GeneralMsgOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\")\n\x16GeneralHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x1d\n\nPingOutout\x12\x0f\n\x07message\x18\x01 \x01(\t\"*\n\tTestInput\x12\x1d\n\x06people\x18\x01 \x03(\x0b\x32\r.proto.Person\"\xbe\x01\n\nTestOutput\x12\x10\n\x08strArray\x18\x01 \x03(\t\x12\x31\n\x08mapValue\x18\x02 \x03(\x0b\x32\x1f.proto.TestOutput.MapValueEntry\x12\x19\n\x02pn\x18\x04 \x01(\x0b\x32\r.proto.Person\x12\x1f\n\x07profile\x18\x03 \x03(\x0b\x32\x0e.proto.Profile\x1a/\n\rMapValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcf\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x03\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12)\n\x06phones\x18\x04 \x03(\x0b\x32\x19.proto.Person.PhoneNumber\x1a\x44\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0e\x32\x17.proto.Person.PhoneType\"+\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\"\xa9\x01\n\x07Profile\x12%\n\x08\x66ileInfo\x18\x01 \x01(\x0b\x32\x13.proto.Profile.File\x1aw\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\ttypeInt32\x18\x02 \x01(\x05\x12\x11\n\ttypeInt64\x18\x03 \x01(\x03\x12\x11\n\ttypeFloat\x18\x04 \x01(\x02\x12\x12\n\ntypeDouble\x18\x05 \x01(\x01\x12\x14\n\x0c\x62ooleanValue\x18\x06 \x01(\x08\"4\n\x15GetUsersByStatusInput\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\":\n\x16GetUsersByStatusOutput\x12 \n\x05users\x18\x01 \x03(\x0b\x32\x11.proto.UserOutput\")\n\x16\x41\x63\x63ountHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\nLoginInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\xf2\x01\n\nUserOutput\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05phone\x18\x04 \x01(\t\x12\x11\n\tfirstName\x18\x05 \x01(\t\x12\x10\n\x08lastName\x18\x06 \x01(\t\x12\x13\n\x0b\x63reatedTime\x18\x07 \x01(\t\x12\x11\n\tnamespace\x18\x08 \x01(\t\x12\x12\n\ndomainName\x18\t \x01(\t\x12\x0f\n\x07\x63ompany\x18\n \x01(\t\x12\x0b\n\x03url\x18\x0b \x01(\t\x12\x14\n\x0c\x61gentAccount\x18\x0c \x01(\t\x12\x15\n\ragentPassword\x18\r \x01(\t\"s\n\x0bSingupInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\r\n\x05phone\x18\x02 \x01(\t\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x10\n\x08password\x18\x05 \x01(\t\x12\x0f\n\x07\x63ompany\x18\x06 \x01(\t\"\x1f\n\x0cSingupOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\x0f\x44\x65leteUserInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"C\n\x15UpdateUserStatusInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\"\'\n\x16ResendConfirmCodeInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\"+\n\x0c\x43onfirmInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\"$\n\x11\x44PHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"n\n\x17\x44PGetPhysicalDisksInput\x12\x0f\n\x07hostIds\x18\x01 \x01(\t\x12\x0b\n\x03ids\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"{\n\x19\x44PGetDisksPredictionInput\x12\x17\n\x0fphysicalDiskIds\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"\x1e\n\x0e\x44PBinaryOutput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\",\n\x19\x43ollectionHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\"\n\x10PostMetricsInput\x12\x0e\n\x06points\x18\x01 \x03(\t\" \n\x10PostDBRelayInput\x12\x0c\n\x04\x63mds\x18\x01 \x03(\t\":\n\x17\x43ollectionMessageOutput\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2\x85\x02\n\x07General\x12\x63\n\x10GeneralHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.GeneralHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/general/heartbeat\x12\x46\n\x04Ping\x12\x0c.proto.Empty\x1a\x11.proto.PingOutout\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/general/ping\x12M\n\x04Test\x12\x10.proto.TestInput\x1a\x11.proto.TestOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/general/test:\x01*2\xa4\x06\n\x07\x41\x63\x63ount\x12\x63\n\x10\x41\x63\x63ountHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.AccountHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/account/heartbeat\x12N\n\x05Login\x12\x11.proto.LoginInput\x1a\x11.proto.UserOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x14/apis/v2/users/login:\x01*\x12S\n\x06Signup\x12\x12.proto.SingupInput\x1a\x13.proto.SingupOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/users/signup:\x01*\x12r\n\x11ResendConfirmCode\x12\x1d.proto.ResendConfirmCodeInput\x1a\x17.proto.GeneralMsgOutput\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/apis/v2/users/confirmcode:\x01*\x12_\n\x07\x43onfirm\x12\x13.proto.ConfirmInput\x1a\x17.proto.GeneralMsgOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/users/confirmation:\x01*\x12g\n\x10GetUsersByStatus\x12\x1c.proto.GetUsersByStatusInput\x1a\x1d.proto.GetUsersByStatusOutput\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/apis/v2/users\x12\x63\n\nDeleteUser\x12\x16.proto.DeleteUserInput\x1a\x17.proto.GeneralMsgOutput\"$\x82\xd3\xe4\x93\x02\x1e*\x1c/apis/v2/users/{email}/{key}\x12l\n\x10UpdateUserStatus\x12\x1c.proto.UpdateUserStatusInput\x1a\x17.proto.GeneralMsgOutput\"!\x82\xd3\xe4\x93\x02\x1b\x1a\x16/apis/v2/users/{email}:\x01*2\xcf\x02\n\x0b\x44iskprophet\x12T\n\x0b\x44PHeartbeat\x12\x0c.proto.Empty\x1a\x18.proto.DPHeartbeatOutput\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/dp/heartbeat\x12l\n\x12\x44PGetPhysicalDisks\x12\x1e.proto.DPGetPhysicalDisksInput\x1a\x15.proto.DPBinaryOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/apis/v2/physical-disks\x12|\n\x14\x44PGetDisksPrediction\x12 .proto.DPGetDisksPredictionInput\x1a\x15.proto.DPBinaryOutput\"+\x82\xd3\xe4\x93\x02%\x12#/apis/v2/physical-disks/predictions2\xdb\x02\n\nCollection\x12l\n\x13\x43ollectionHeartbeat\x12\x0c.proto.Empty\x1a .proto.CollectionHeartbeatOutput\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/apis/v2/collection/heartbeat\x12o\n\x0bPostDBRelay\x12\x17.proto.PostDBRelayInput\x1a\x1e.proto.CollectionMessageOutput\"\'\x82\xd3\xe4\x93\x02!\"\x1c/apis/v2/collection/relation:\x01*\x12n\n\x0bPostMetrics\x12\x17.proto.PostMetricsInput\x1a\x1e.proto.CollectionMessageOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/collection/metrics:\x01*b\x06proto3') - , - dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,]) - - - -_PERSON_PHONETYPE = _descriptor.EnumDescriptor( - name='PhoneType', - full_name='proto.Person.PhoneType', - filename=None, - file=DESCRIPTOR, - values=[ - _descriptor.EnumValueDescriptor( - name='MOBILE', index=0, number=0, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='HOME', index=1, number=1, - options=None, - type=None), - _descriptor.EnumValueDescriptor( - name='WORK', index=2, number=2, - options=None, - type=None), - ], - containing_type=None, - options=None, - serialized_start=579, - serialized_end=622, -) -_sym_db.RegisterEnumDescriptor(_PERSON_PHONETYPE) - - -_EMPTY = _descriptor.Descriptor( - name='Empty', - full_name='proto.Empty', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=57, - serialized_end=64, -) - - -_GENERALMSGOUTPUT = _descriptor.Descriptor( - name='GeneralMsgOutput', - full_name='proto.GeneralMsgOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='proto.GeneralMsgOutput.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=66, - serialized_end=101, -) - - -_GENERALHEARTBEATOUTPUT = _descriptor.Descriptor( - name='GeneralHeartbeatOutput', - full_name='proto.GeneralHeartbeatOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='proto.GeneralHeartbeatOutput.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=103, - serialized_end=144, -) - - -_PINGOUTOUT = _descriptor.Descriptor( - name='PingOutout', - full_name='proto.PingOutout', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='proto.PingOutout.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=146, - serialized_end=175, -) - - -_TESTINPUT = _descriptor.Descriptor( - name='TestInput', - full_name='proto.TestInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='people', full_name='proto.TestInput.people', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=177, - serialized_end=219, -) - - -_TESTOUTPUT_MAPVALUEENTRY = _descriptor.Descriptor( - name='MapValueEntry', - full_name='proto.TestOutput.MapValueEntry', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='key', full_name='proto.TestOutput.MapValueEntry.key', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='value', full_name='proto.TestOutput.MapValueEntry.value', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=365, - serialized_end=412, -) - -_TESTOUTPUT = _descriptor.Descriptor( - name='TestOutput', - full_name='proto.TestOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='strArray', full_name='proto.TestOutput.strArray', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='mapValue', full_name='proto.TestOutput.mapValue', index=1, - number=2, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='pn', full_name='proto.TestOutput.pn', index=2, - number=4, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='profile', full_name='proto.TestOutput.profile', index=3, - number=3, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_TESTOUTPUT_MAPVALUEENTRY, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=222, - serialized_end=412, -) - - -_PERSON_PHONENUMBER = _descriptor.Descriptor( - name='PhoneNumber', - full_name='proto.Person.PhoneNumber', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='number', full_name='proto.Person.PhoneNumber.number', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='type', full_name='proto.Person.PhoneNumber.type', index=1, - number=2, type=14, cpp_type=8, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=509, - serialized_end=577, -) - -_PERSON = _descriptor.Descriptor( - name='Person', - full_name='proto.Person', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='proto.Person.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='id', full_name='proto.Person.id', index=1, - number=2, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='email', full_name='proto.Person.email', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='phones', full_name='proto.Person.phones', index=3, - number=4, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_PERSON_PHONENUMBER, ], - enum_types=[ - _PERSON_PHONETYPE, - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=415, - serialized_end=622, -) - - -_PROFILE_FILE = _descriptor.Descriptor( - name='File', - full_name='proto.Profile.File', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='name', full_name='proto.Profile.File.name', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='typeInt32', full_name='proto.Profile.File.typeInt32', index=1, - number=2, type=5, cpp_type=1, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='typeInt64', full_name='proto.Profile.File.typeInt64', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='typeFloat', full_name='proto.Profile.File.typeFloat', index=3, - number=4, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='typeDouble', full_name='proto.Profile.File.typeDouble', index=4, - number=5, type=1, cpp_type=5, label=1, - has_default_value=False, default_value=float(0), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='booleanValue', full_name='proto.Profile.File.booleanValue', index=5, - number=6, type=8, cpp_type=7, label=1, - has_default_value=False, default_value=False, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=675, - serialized_end=794, -) - -_PROFILE = _descriptor.Descriptor( - name='Profile', - full_name='proto.Profile', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='fileInfo', full_name='proto.Profile.fileInfo', index=0, - number=1, type=11, cpp_type=10, label=1, - has_default_value=False, default_value=None, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[_PROFILE_FILE, ], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=625, - serialized_end=794, -) - - -_GETUSERSBYSTATUSINPUT = _descriptor.Descriptor( - name='GetUsersByStatusInput', - full_name='proto.GetUsersByStatusInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='status', full_name='proto.GetUsersByStatusInput.status', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='key', full_name='proto.GetUsersByStatusInput.key', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=796, - serialized_end=848, -) - - -_GETUSERSBYSTATUSOUTPUT = _descriptor.Descriptor( - name='GetUsersByStatusOutput', - full_name='proto.GetUsersByStatusOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='users', full_name='proto.GetUsersByStatusOutput.users', index=0, - number=1, type=11, cpp_type=10, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=850, - serialized_end=908, -) - - -_ACCOUNTHEARTBEATOUTPUT = _descriptor.Descriptor( - name='AccountHeartbeatOutput', - full_name='proto.AccountHeartbeatOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='proto.AccountHeartbeatOutput.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=910, - serialized_end=951, -) - - -_LOGININPUT = _descriptor.Descriptor( - name='LoginInput', - full_name='proto.LoginInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='email', full_name='proto.LoginInput.email', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='password', full_name='proto.LoginInput.password', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=953, - serialized_end=998, -) - - -_USEROUTPUT = _descriptor.Descriptor( - name='UserOutput', - full_name='proto.UserOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='id', full_name='proto.UserOutput.id', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='email', full_name='proto.UserOutput.email', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='status', full_name='proto.UserOutput.status', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='phone', full_name='proto.UserOutput.phone', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='firstName', full_name='proto.UserOutput.firstName', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='lastName', full_name='proto.UserOutput.lastName', index=5, - number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='createdTime', full_name='proto.UserOutput.createdTime', index=6, - number=7, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='namespace', full_name='proto.UserOutput.namespace', index=7, - number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='domainName', full_name='proto.UserOutput.domainName', index=8, - number=9, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='company', full_name='proto.UserOutput.company', index=9, - number=10, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='url', full_name='proto.UserOutput.url', index=10, - number=11, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='agentAccount', full_name='proto.UserOutput.agentAccount', index=11, - number=12, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='agentPassword', full_name='proto.UserOutput.agentPassword', index=12, - number=13, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1001, - serialized_end=1243, -) - - -_SINGUPINPUT = _descriptor.Descriptor( - name='SingupInput', - full_name='proto.SingupInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='email', full_name='proto.SingupInput.email', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='phone', full_name='proto.SingupInput.phone', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='firstName', full_name='proto.SingupInput.firstName', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='lastName', full_name='proto.SingupInput.lastName', index=3, - number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='password', full_name='proto.SingupInput.password', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='company', full_name='proto.SingupInput.company', index=5, - number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1245, - serialized_end=1360, -) - - -_SINGUPOUTPUT = _descriptor.Descriptor( - name='SingupOutput', - full_name='proto.SingupOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='proto.SingupOutput.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1362, - serialized_end=1393, -) - - -_DELETEUSERINPUT = _descriptor.Descriptor( - name='DeleteUserInput', - full_name='proto.DeleteUserInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='email', full_name='proto.DeleteUserInput.email', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='key', full_name='proto.DeleteUserInput.key', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1395, - serialized_end=1440, -) - - -_UPDATEUSERSTATUSINPUT = _descriptor.Descriptor( - name='UpdateUserStatusInput', - full_name='proto.UpdateUserStatusInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='email', full_name='proto.UpdateUserStatusInput.email', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='key', full_name='proto.UpdateUserStatusInput.key', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='status', full_name='proto.UpdateUserStatusInput.status', index=2, - number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1442, - serialized_end=1509, -) - - -_RESENDCONFIRMCODEINPUT = _descriptor.Descriptor( - name='ResendConfirmCodeInput', - full_name='proto.ResendConfirmCodeInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='email', full_name='proto.ResendConfirmCodeInput.email', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1511, - serialized_end=1550, -) - - -_CONFIRMINPUT = _descriptor.Descriptor( - name='ConfirmInput', - full_name='proto.ConfirmInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='email', full_name='proto.ConfirmInput.email', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='code', full_name='proto.ConfirmInput.code', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1552, - serialized_end=1595, -) - - -_DPHEARTBEATOUTPUT = _descriptor.Descriptor( - name='DPHeartbeatOutput', - full_name='proto.DPHeartbeatOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='proto.DPHeartbeatOutput.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1597, - serialized_end=1633, -) - - -_DPGETPHYSICALDISKSINPUT = _descriptor.Descriptor( - name='DPGetPhysicalDisksInput', - full_name='proto.DPGetPhysicalDisksInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='hostIds', full_name='proto.DPGetPhysicalDisksInput.hostIds', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='ids', full_name='proto.DPGetPhysicalDisksInput.ids', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='limit', full_name='proto.DPGetPhysicalDisksInput.limit', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page', full_name='proto.DPGetPhysicalDisksInput.page', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='from', full_name='proto.DPGetPhysicalDisksInput.from', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='to', full_name='proto.DPGetPhysicalDisksInput.to', index=5, - number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1635, - serialized_end=1745, -) - - -_DPGETDISKSPREDICTIONINPUT = _descriptor.Descriptor( - name='DPGetDisksPredictionInput', - full_name='proto.DPGetDisksPredictionInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='physicalDiskIds', full_name='proto.DPGetDisksPredictionInput.physicalDiskIds', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='status', full_name='proto.DPGetDisksPredictionInput.status', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='limit', full_name='proto.DPGetDisksPredictionInput.limit', index=2, - number=3, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='page', full_name='proto.DPGetDisksPredictionInput.page', index=3, - number=4, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='from', full_name='proto.DPGetDisksPredictionInput.from', index=4, - number=5, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='to', full_name='proto.DPGetDisksPredictionInput.to', index=5, - number=6, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1747, - serialized_end=1870, -) - - -_DPBINARYOUTPUT = _descriptor.Descriptor( - name='DPBinaryOutput', - full_name='proto.DPBinaryOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='data', full_name='proto.DPBinaryOutput.data', index=0, - number=1, type=12, cpp_type=9, label=1, - has_default_value=False, default_value=_b(""), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1872, - serialized_end=1902, -) - - -_COLLECTIONHEARTBEATOUTPUT = _descriptor.Descriptor( - name='CollectionHeartbeatOutput', - full_name='proto.CollectionHeartbeatOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='message', full_name='proto.CollectionHeartbeatOutput.message', index=0, - number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1904, - serialized_end=1948, -) - - -_POSTMETRICSINPUT = _descriptor.Descriptor( - name='PostMetricsInput', - full_name='proto.PostMetricsInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='points', full_name='proto.PostMetricsInput.points', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1950, - serialized_end=1984, -) - - -_POSTDBRELAYINPUT = _descriptor.Descriptor( - name='PostDBRelayInput', - full_name='proto.PostDBRelayInput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='cmds', full_name='proto.PostDBRelayInput.cmds', index=0, - number=1, type=9, cpp_type=9, label=3, - has_default_value=False, default_value=[], - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=1986, - serialized_end=2018, -) - - -_COLLECTIONMESSAGEOUTPUT = _descriptor.Descriptor( - name='CollectionMessageOutput', - full_name='proto.CollectionMessageOutput', - filename=None, - file=DESCRIPTOR, - containing_type=None, - fields=[ - _descriptor.FieldDescriptor( - name='status', full_name='proto.CollectionMessageOutput.status', index=0, - number=1, type=3, cpp_type=2, label=1, - has_default_value=False, default_value=0, - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - _descriptor.FieldDescriptor( - name='message', full_name='proto.CollectionMessageOutput.message', index=1, - number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=_b("").decode('utf-8'), - message_type=None, enum_type=None, containing_type=None, - is_extension=False, extension_scope=None, - options=None, file=DESCRIPTOR), - ], - extensions=[ - ], - nested_types=[], - enum_types=[ - ], - options=None, - is_extendable=False, - syntax='proto3', - extension_ranges=[], - oneofs=[ - ], - serialized_start=2020, - serialized_end=2078, -) - -_TESTINPUT.fields_by_name['people'].message_type = _PERSON -_TESTOUTPUT_MAPVALUEENTRY.containing_type = _TESTOUTPUT -_TESTOUTPUT.fields_by_name['mapValue'].message_type = _TESTOUTPUT_MAPVALUEENTRY -_TESTOUTPUT.fields_by_name['pn'].message_type = _PERSON -_TESTOUTPUT.fields_by_name['profile'].message_type = _PROFILE -_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE -_PERSON_PHONENUMBER.containing_type = _PERSON -_PERSON.fields_by_name['phones'].message_type = _PERSON_PHONENUMBER -_PERSON_PHONETYPE.containing_type = _PERSON -_PROFILE_FILE.containing_type = _PROFILE -_PROFILE.fields_by_name['fileInfo'].message_type = _PROFILE_FILE -_GETUSERSBYSTATUSOUTPUT.fields_by_name['users'].message_type = _USEROUTPUT -DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY -DESCRIPTOR.message_types_by_name['GeneralMsgOutput'] = _GENERALMSGOUTPUT -DESCRIPTOR.message_types_by_name['GeneralHeartbeatOutput'] = _GENERALHEARTBEATOUTPUT -DESCRIPTOR.message_types_by_name['PingOutout'] = _PINGOUTOUT -DESCRIPTOR.message_types_by_name['TestInput'] = _TESTINPUT -DESCRIPTOR.message_types_by_name['TestOutput'] = _TESTOUTPUT -DESCRIPTOR.message_types_by_name['Person'] = _PERSON -DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE -DESCRIPTOR.message_types_by_name['GetUsersByStatusInput'] = _GETUSERSBYSTATUSINPUT -DESCRIPTOR.message_types_by_name['GetUsersByStatusOutput'] = _GETUSERSBYSTATUSOUTPUT -DESCRIPTOR.message_types_by_name['AccountHeartbeatOutput'] = _ACCOUNTHEARTBEATOUTPUT -DESCRIPTOR.message_types_by_name['LoginInput'] = _LOGININPUT -DESCRIPTOR.message_types_by_name['UserOutput'] = _USEROUTPUT -DESCRIPTOR.message_types_by_name['SingupInput'] = _SINGUPINPUT -DESCRIPTOR.message_types_by_name['SingupOutput'] = _SINGUPOUTPUT -DESCRIPTOR.message_types_by_name['DeleteUserInput'] = _DELETEUSERINPUT -DESCRIPTOR.message_types_by_name['UpdateUserStatusInput'] = _UPDATEUSERSTATUSINPUT -DESCRIPTOR.message_types_by_name['ResendConfirmCodeInput'] = _RESENDCONFIRMCODEINPUT -DESCRIPTOR.message_types_by_name['ConfirmInput'] = _CONFIRMINPUT -DESCRIPTOR.message_types_by_name['DPHeartbeatOutput'] = _DPHEARTBEATOUTPUT -DESCRIPTOR.message_types_by_name['DPGetPhysicalDisksInput'] = _DPGETPHYSICALDISKSINPUT -DESCRIPTOR.message_types_by_name['DPGetDisksPredictionInput'] = _DPGETDISKSPREDICTIONINPUT -DESCRIPTOR.message_types_by_name['DPBinaryOutput'] = _DPBINARYOUTPUT -DESCRIPTOR.message_types_by_name['CollectionHeartbeatOutput'] = _COLLECTIONHEARTBEATOUTPUT -DESCRIPTOR.message_types_by_name['PostMetricsInput'] = _POSTMETRICSINPUT -DESCRIPTOR.message_types_by_name['PostDBRelayInput'] = _POSTDBRELAYINPUT -DESCRIPTOR.message_types_by_name['CollectionMessageOutput'] = _COLLECTIONMESSAGEOUTPUT -_sym_db.RegisterFileDescriptor(DESCRIPTOR) - -Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict( - DESCRIPTOR = _EMPTY, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.Empty) - )) -_sym_db.RegisterMessage(Empty) - -GeneralMsgOutput = _reflection.GeneratedProtocolMessageType('GeneralMsgOutput', (_message.Message,), dict( - DESCRIPTOR = _GENERALMSGOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.GeneralMsgOutput) - )) -_sym_db.RegisterMessage(GeneralMsgOutput) - -GeneralHeartbeatOutput = _reflection.GeneratedProtocolMessageType('GeneralHeartbeatOutput', (_message.Message,), dict( - DESCRIPTOR = _GENERALHEARTBEATOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.GeneralHeartbeatOutput) - )) -_sym_db.RegisterMessage(GeneralHeartbeatOutput) - -PingOutout = _reflection.GeneratedProtocolMessageType('PingOutout', (_message.Message,), dict( - DESCRIPTOR = _PINGOUTOUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.PingOutout) - )) -_sym_db.RegisterMessage(PingOutout) - -TestInput = _reflection.GeneratedProtocolMessageType('TestInput', (_message.Message,), dict( - DESCRIPTOR = _TESTINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.TestInput) - )) -_sym_db.RegisterMessage(TestInput) - -TestOutput = _reflection.GeneratedProtocolMessageType('TestOutput', (_message.Message,), dict( - - MapValueEntry = _reflection.GeneratedProtocolMessageType('MapValueEntry', (_message.Message,), dict( - DESCRIPTOR = _TESTOUTPUT_MAPVALUEENTRY, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.TestOutput.MapValueEntry) - )) - , - DESCRIPTOR = _TESTOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.TestOutput) - )) -_sym_db.RegisterMessage(TestOutput) -_sym_db.RegisterMessage(TestOutput.MapValueEntry) - -Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict( - - PhoneNumber = _reflection.GeneratedProtocolMessageType('PhoneNumber', (_message.Message,), dict( - DESCRIPTOR = _PERSON_PHONENUMBER, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.Person.PhoneNumber) - )) - , - DESCRIPTOR = _PERSON, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.Person) - )) -_sym_db.RegisterMessage(Person) -_sym_db.RegisterMessage(Person.PhoneNumber) - -Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict( - - File = _reflection.GeneratedProtocolMessageType('File', (_message.Message,), dict( - DESCRIPTOR = _PROFILE_FILE, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.Profile.File) - )) - , - DESCRIPTOR = _PROFILE, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.Profile) - )) -_sym_db.RegisterMessage(Profile) -_sym_db.RegisterMessage(Profile.File) - -GetUsersByStatusInput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusInput', (_message.Message,), dict( - DESCRIPTOR = _GETUSERSBYSTATUSINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusInput) - )) -_sym_db.RegisterMessage(GetUsersByStatusInput) - -GetUsersByStatusOutput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusOutput', (_message.Message,), dict( - DESCRIPTOR = _GETUSERSBYSTATUSOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusOutput) - )) -_sym_db.RegisterMessage(GetUsersByStatusOutput) - -AccountHeartbeatOutput = _reflection.GeneratedProtocolMessageType('AccountHeartbeatOutput', (_message.Message,), dict( - DESCRIPTOR = _ACCOUNTHEARTBEATOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.AccountHeartbeatOutput) - )) -_sym_db.RegisterMessage(AccountHeartbeatOutput) - -LoginInput = _reflection.GeneratedProtocolMessageType('LoginInput', (_message.Message,), dict( - DESCRIPTOR = _LOGININPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.LoginInput) - )) -_sym_db.RegisterMessage(LoginInput) - -UserOutput = _reflection.GeneratedProtocolMessageType('UserOutput', (_message.Message,), dict( - DESCRIPTOR = _USEROUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.UserOutput) - )) -_sym_db.RegisterMessage(UserOutput) - -SingupInput = _reflection.GeneratedProtocolMessageType('SingupInput', (_message.Message,), dict( - DESCRIPTOR = _SINGUPINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.SingupInput) - )) -_sym_db.RegisterMessage(SingupInput) - -SingupOutput = _reflection.GeneratedProtocolMessageType('SingupOutput', (_message.Message,), dict( - DESCRIPTOR = _SINGUPOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.SingupOutput) - )) -_sym_db.RegisterMessage(SingupOutput) - -DeleteUserInput = _reflection.GeneratedProtocolMessageType('DeleteUserInput', (_message.Message,), dict( - DESCRIPTOR = _DELETEUSERINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.DeleteUserInput) - )) -_sym_db.RegisterMessage(DeleteUserInput) - -UpdateUserStatusInput = _reflection.GeneratedProtocolMessageType('UpdateUserStatusInput', (_message.Message,), dict( - DESCRIPTOR = _UPDATEUSERSTATUSINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.UpdateUserStatusInput) - )) -_sym_db.RegisterMessage(UpdateUserStatusInput) - -ResendConfirmCodeInput = _reflection.GeneratedProtocolMessageType('ResendConfirmCodeInput', (_message.Message,), dict( - DESCRIPTOR = _RESENDCONFIRMCODEINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.ResendConfirmCodeInput) - )) -_sym_db.RegisterMessage(ResendConfirmCodeInput) - -ConfirmInput = _reflection.GeneratedProtocolMessageType('ConfirmInput', (_message.Message,), dict( - DESCRIPTOR = _CONFIRMINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.ConfirmInput) - )) -_sym_db.RegisterMessage(ConfirmInput) - -DPHeartbeatOutput = _reflection.GeneratedProtocolMessageType('DPHeartbeatOutput', (_message.Message,), dict( - DESCRIPTOR = _DPHEARTBEATOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.DPHeartbeatOutput) - )) -_sym_db.RegisterMessage(DPHeartbeatOutput) - -DPGetPhysicalDisksInput = _reflection.GeneratedProtocolMessageType('DPGetPhysicalDisksInput', (_message.Message,), dict( - DESCRIPTOR = _DPGETPHYSICALDISKSINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.DPGetPhysicalDisksInput) - )) -_sym_db.RegisterMessage(DPGetPhysicalDisksInput) - -DPGetDisksPredictionInput = _reflection.GeneratedProtocolMessageType('DPGetDisksPredictionInput', (_message.Message,), dict( - DESCRIPTOR = _DPGETDISKSPREDICTIONINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.DPGetDisksPredictionInput) - )) -_sym_db.RegisterMessage(DPGetDisksPredictionInput) - -DPBinaryOutput = _reflection.GeneratedProtocolMessageType('DPBinaryOutput', (_message.Message,), dict( - DESCRIPTOR = _DPBINARYOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.DPBinaryOutput) - )) -_sym_db.RegisterMessage(DPBinaryOutput) - -CollectionHeartbeatOutput = _reflection.GeneratedProtocolMessageType('CollectionHeartbeatOutput', (_message.Message,), dict( - DESCRIPTOR = _COLLECTIONHEARTBEATOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.CollectionHeartbeatOutput) - )) -_sym_db.RegisterMessage(CollectionHeartbeatOutput) - -PostMetricsInput = _reflection.GeneratedProtocolMessageType('PostMetricsInput', (_message.Message,), dict( - DESCRIPTOR = _POSTMETRICSINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.PostMetricsInput) - )) -_sym_db.RegisterMessage(PostMetricsInput) - -PostDBRelayInput = _reflection.GeneratedProtocolMessageType('PostDBRelayInput', (_message.Message,), dict( - DESCRIPTOR = _POSTDBRELAYINPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.PostDBRelayInput) - )) -_sym_db.RegisterMessage(PostDBRelayInput) - -CollectionMessageOutput = _reflection.GeneratedProtocolMessageType('CollectionMessageOutput', (_message.Message,), dict( - DESCRIPTOR = _COLLECTIONMESSAGEOUTPUT, - __module__ = 'mainServer_pb2' - # @@protoc_insertion_point(class_scope:proto.CollectionMessageOutput) - )) -_sym_db.RegisterMessage(CollectionMessageOutput) - - -_TESTOUTPUT_MAPVALUEENTRY.has_options = True -_TESTOUTPUT_MAPVALUEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) - -_GENERAL = _descriptor.ServiceDescriptor( - name='General', - full_name='proto.General', - file=DESCRIPTOR, - index=0, - options=None, - serialized_start=2081, - serialized_end=2342, - methods=[ - _descriptor.MethodDescriptor( - name='GeneralHeartbeat', - full_name='proto.General.GeneralHeartbeat', - index=0, - containing_service=None, - input_type=_EMPTY, - output_type=_GENERALHEARTBEATOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/general/heartbeat')), - ), - _descriptor.MethodDescriptor( - name='Ping', - full_name='proto.General.Ping', - index=1, - containing_service=None, - input_type=_EMPTY, - output_type=_PINGOUTOUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/general/ping')), - ), - _descriptor.MethodDescriptor( - name='Test', - full_name='proto.General.Test', - index=2, - containing_service=None, - input_type=_TESTINPUT, - output_type=_TESTOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/general/test:\001*')), - ), -]) -_sym_db.RegisterServiceDescriptor(_GENERAL) - -DESCRIPTOR.services_by_name['General'] = _GENERAL - - -_ACCOUNT = _descriptor.ServiceDescriptor( - name='Account', - full_name='proto.Account', - file=DESCRIPTOR, - index=1, - options=None, - serialized_start=2345, - serialized_end=3149, - methods=[ - _descriptor.MethodDescriptor( - name='AccountHeartbeat', - full_name='proto.Account.AccountHeartbeat', - index=0, - containing_service=None, - input_type=_EMPTY, - output_type=_ACCOUNTHEARTBEATOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/account/heartbeat')), - ), - _descriptor.MethodDescriptor( - name='Login', - full_name='proto.Account.Login', - index=1, - containing_service=None, - input_type=_LOGININPUT, - output_type=_USEROUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\"\024/apis/v2/users/login:\001*')), - ), - _descriptor.MethodDescriptor( - name='Signup', - full_name='proto.Account.Signup', - index=2, - containing_service=None, - input_type=_SINGUPINPUT, - output_type=_SINGUPOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/users/signup:\001*')), - ), - _descriptor.MethodDescriptor( - name='ResendConfirmCode', - full_name='proto.Account.ResendConfirmCode', - index=3, - containing_service=None, - input_type=_RESENDCONFIRMCODEINPUT, - output_type=_GENERALMSGOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\"\032/apis/v2/users/confirmcode:\001*')), - ), - _descriptor.MethodDescriptor( - name='Confirm', - full_name='proto.Account.Confirm', - index=4, - containing_service=None, - input_type=_CONFIRMINPUT, - output_type=_GENERALMSGOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/users/confirmation:\001*')), - ), - _descriptor.MethodDescriptor( - name='GetUsersByStatus', - full_name='proto.Account.GetUsersByStatus', - index=5, - containing_service=None, - input_type=_GETUSERSBYSTATUSINPUT, - output_type=_GETUSERSBYSTATUSOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\020\022\016/apis/v2/users')), - ), - _descriptor.MethodDescriptor( - name='DeleteUser', - full_name='proto.Account.DeleteUser', - index=6, - containing_service=None, - input_type=_DELETEUSERINPUT, - output_type=_GENERALMSGOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\036*\034/apis/v2/users/{email}/{key}')), - ), - _descriptor.MethodDescriptor( - name='UpdateUserStatus', - full_name='proto.Account.UpdateUserStatus', - index=7, - containing_service=None, - input_type=_UPDATEUSERSTATUSINPUT, - output_type=_GENERALMSGOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\033\032\026/apis/v2/users/{email}:\001*')), - ), -]) -_sym_db.RegisterServiceDescriptor(_ACCOUNT) - -DESCRIPTOR.services_by_name['Account'] = _ACCOUNT - - -_DISKPROPHET = _descriptor.ServiceDescriptor( - name='Diskprophet', - full_name='proto.Diskprophet', - file=DESCRIPTOR, - index=2, - options=None, - serialized_start=3152, - serialized_end=3487, - methods=[ - _descriptor.MethodDescriptor( - name='DPHeartbeat', - full_name='proto.Diskprophet.DPHeartbeat', - index=0, - containing_service=None, - input_type=_EMPTY, - output_type=_DPHEARTBEATOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/dp/heartbeat')), - ), - _descriptor.MethodDescriptor( - name='DPGetPhysicalDisks', - full_name='proto.Diskprophet.DPGetPhysicalDisks', - index=1, - containing_service=None, - input_type=_DPGETPHYSICALDISKSINPUT, - output_type=_DPBINARYOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\022\027/apis/v2/physical-disks')), - ), - _descriptor.MethodDescriptor( - name='DPGetDisksPrediction', - full_name='proto.Diskprophet.DPGetDisksPrediction', - index=2, - containing_service=None, - input_type=_DPGETDISKSPREDICTIONINPUT, - output_type=_DPBINARYOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/apis/v2/physical-disks/predictions')), - ), -]) -_sym_db.RegisterServiceDescriptor(_DISKPROPHET) - -DESCRIPTOR.services_by_name['Diskprophet'] = _DISKPROPHET - - -_COLLECTION = _descriptor.ServiceDescriptor( - name='Collection', - full_name='proto.Collection', - file=DESCRIPTOR, - index=3, - options=None, - serialized_start=3490, - serialized_end=3837, - methods=[ - _descriptor.MethodDescriptor( - name='CollectionHeartbeat', - full_name='proto.Collection.CollectionHeartbeat', - index=0, - containing_service=None, - input_type=_EMPTY, - output_type=_COLLECTIONHEARTBEATOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\022\035/apis/v2/collection/heartbeat')), - ), - _descriptor.MethodDescriptor( - name='PostDBRelay', - full_name='proto.Collection.PostDBRelay', - index=1, - containing_service=None, - input_type=_POSTDBRELAYINPUT, - output_type=_COLLECTIONMESSAGEOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002!\"\034/apis/v2/collection/relation:\001*')), - ), - _descriptor.MethodDescriptor( - name='PostMetrics', - full_name='proto.Collection.PostMetrics', - index=2, - containing_service=None, - input_type=_POSTMETRICSINPUT, - output_type=_COLLECTIONMESSAGEOUTPUT, - options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/collection/metrics:\001*')), - ), -]) -_sym_db.RegisterServiceDescriptor(_COLLECTION) - -DESCRIPTOR.services_by_name['Collection'] = _COLLECTION - -# @@protoc_insertion_point(module_scope) diff --git a/src/pybind/mgr/diskprediction/common/client_pb2_grpc.py b/src/pybind/mgr/diskprediction/common/client_pb2_grpc.py deleted file mode 100644 index c1c32178a6a..00000000000 --- a/src/pybind/mgr/diskprediction/common/client_pb2_grpc.py +++ /dev/null @@ -1,395 +0,0 @@ -# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! -import grpc - -import client_pb2 as mainServer__pb2 - - -class GeneralStub(object): - """-------------------------- General ------------------------------------- - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.GeneralHeartbeat = channel.unary_unary( - '/proto.General/GeneralHeartbeat', - request_serializer=mainServer__pb2.Empty.SerializeToString, - response_deserializer=mainServer__pb2.GeneralHeartbeatOutput.FromString, - ) - self.Ping = channel.unary_unary( - '/proto.General/Ping', - request_serializer=mainServer__pb2.Empty.SerializeToString, - response_deserializer=mainServer__pb2.PingOutout.FromString, - ) - self.Test = channel.unary_unary( - '/proto.General/Test', - request_serializer=mainServer__pb2.TestInput.SerializeToString, - response_deserializer=mainServer__pb2.TestOutput.FromString, - ) - - -class GeneralServicer(object): - """-------------------------- General ------------------------------------- - """ - - def GeneralHeartbeat(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Ping(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Test(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_GeneralServicer_to_server(servicer, server): - rpc_method_handlers = { - 'GeneralHeartbeat': grpc.unary_unary_rpc_method_handler( - servicer.GeneralHeartbeat, - request_deserializer=mainServer__pb2.Empty.FromString, - response_serializer=mainServer__pb2.GeneralHeartbeatOutput.SerializeToString, - ), - 'Ping': grpc.unary_unary_rpc_method_handler( - servicer.Ping, - request_deserializer=mainServer__pb2.Empty.FromString, - response_serializer=mainServer__pb2.PingOutout.SerializeToString, - ), - 'Test': grpc.unary_unary_rpc_method_handler( - servicer.Test, - request_deserializer=mainServer__pb2.TestInput.FromString, - response_serializer=mainServer__pb2.TestOutput.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'proto.General', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class AccountStub(object): - """-------------------------- SERVER ACCOUNT ------------------------------ - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.AccountHeartbeat = channel.unary_unary( - '/proto.Account/AccountHeartbeat', - request_serializer=mainServer__pb2.Empty.SerializeToString, - response_deserializer=mainServer__pb2.AccountHeartbeatOutput.FromString, - ) - self.Login = channel.unary_unary( - '/proto.Account/Login', - request_serializer=mainServer__pb2.LoginInput.SerializeToString, - response_deserializer=mainServer__pb2.UserOutput.FromString, - ) - self.Signup = channel.unary_unary( - '/proto.Account/Signup', - request_serializer=mainServer__pb2.SingupInput.SerializeToString, - response_deserializer=mainServer__pb2.SingupOutput.FromString, - ) - self.ResendConfirmCode = channel.unary_unary( - '/proto.Account/ResendConfirmCode', - request_serializer=mainServer__pb2.ResendConfirmCodeInput.SerializeToString, - response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, - ) - self.Confirm = channel.unary_unary( - '/proto.Account/Confirm', - request_serializer=mainServer__pb2.ConfirmInput.SerializeToString, - response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, - ) - self.GetUsersByStatus = channel.unary_unary( - '/proto.Account/GetUsersByStatus', - request_serializer=mainServer__pb2.GetUsersByStatusInput.SerializeToString, - response_deserializer=mainServer__pb2.GetUsersByStatusOutput.FromString, - ) - self.DeleteUser = channel.unary_unary( - '/proto.Account/DeleteUser', - request_serializer=mainServer__pb2.DeleteUserInput.SerializeToString, - response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, - ) - self.UpdateUserStatus = channel.unary_unary( - '/proto.Account/UpdateUserStatus', - request_serializer=mainServer__pb2.UpdateUserStatusInput.SerializeToString, - response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, - ) - - -class AccountServicer(object): - """-------------------------- SERVER ACCOUNT ------------------------------ - """ - - def AccountHeartbeat(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Login(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Signup(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def ResendConfirmCode(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def Confirm(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def GetUsersByStatus(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DeleteUser(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def UpdateUserStatus(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_AccountServicer_to_server(servicer, server): - rpc_method_handlers = { - 'AccountHeartbeat': grpc.unary_unary_rpc_method_handler( - servicer.AccountHeartbeat, - request_deserializer=mainServer__pb2.Empty.FromString, - response_serializer=mainServer__pb2.AccountHeartbeatOutput.SerializeToString, - ), - 'Login': grpc.unary_unary_rpc_method_handler( - servicer.Login, - request_deserializer=mainServer__pb2.LoginInput.FromString, - response_serializer=mainServer__pb2.UserOutput.SerializeToString, - ), - 'Signup': grpc.unary_unary_rpc_method_handler( - servicer.Signup, - request_deserializer=mainServer__pb2.SingupInput.FromString, - response_serializer=mainServer__pb2.SingupOutput.SerializeToString, - ), - 'ResendConfirmCode': grpc.unary_unary_rpc_method_handler( - servicer.ResendConfirmCode, - request_deserializer=mainServer__pb2.ResendConfirmCodeInput.FromString, - response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, - ), - 'Confirm': grpc.unary_unary_rpc_method_handler( - servicer.Confirm, - request_deserializer=mainServer__pb2.ConfirmInput.FromString, - response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, - ), - 'GetUsersByStatus': grpc.unary_unary_rpc_method_handler( - servicer.GetUsersByStatus, - request_deserializer=mainServer__pb2.GetUsersByStatusInput.FromString, - response_serializer=mainServer__pb2.GetUsersByStatusOutput.SerializeToString, - ), - 'DeleteUser': grpc.unary_unary_rpc_method_handler( - servicer.DeleteUser, - request_deserializer=mainServer__pb2.DeleteUserInput.FromString, - response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, - ), - 'UpdateUserStatus': grpc.unary_unary_rpc_method_handler( - servicer.UpdateUserStatus, - request_deserializer=mainServer__pb2.UpdateUserStatusInput.FromString, - response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'proto.Account', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class DiskprophetStub(object): - """------------------------ SERVER DISKPROPHET --------------------------- - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.DPHeartbeat = channel.unary_unary( - '/proto.Diskprophet/DPHeartbeat', - request_serializer=mainServer__pb2.Empty.SerializeToString, - response_deserializer=mainServer__pb2.DPHeartbeatOutput.FromString, - ) - self.DPGetPhysicalDisks = channel.unary_unary( - '/proto.Diskprophet/DPGetPhysicalDisks', - request_serializer=mainServer__pb2.DPGetPhysicalDisksInput.SerializeToString, - response_deserializer=mainServer__pb2.DPBinaryOutput.FromString, - ) - self.DPGetDisksPrediction = channel.unary_unary( - '/proto.Diskprophet/DPGetDisksPrediction', - request_serializer=mainServer__pb2.DPGetDisksPredictionInput.SerializeToString, - response_deserializer=mainServer__pb2.DPBinaryOutput.FromString, - ) - - -class DiskprophetServicer(object): - """------------------------ SERVER DISKPROPHET --------------------------- - """ - - def DPHeartbeat(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DPGetPhysicalDisks(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def DPGetDisksPrediction(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_DiskprophetServicer_to_server(servicer, server): - rpc_method_handlers = { - 'DPHeartbeat': grpc.unary_unary_rpc_method_handler( - servicer.DPHeartbeat, - request_deserializer=mainServer__pb2.Empty.FromString, - response_serializer=mainServer__pb2.DPHeartbeatOutput.SerializeToString, - ), - 'DPGetPhysicalDisks': grpc.unary_unary_rpc_method_handler( - servicer.DPGetPhysicalDisks, - request_deserializer=mainServer__pb2.DPGetPhysicalDisksInput.FromString, - response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString, - ), - 'DPGetDisksPrediction': grpc.unary_unary_rpc_method_handler( - servicer.DPGetDisksPrediction, - request_deserializer=mainServer__pb2.DPGetDisksPredictionInput.FromString, - response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'proto.Diskprophet', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) - - -class CollectionStub(object): - """------------------------ SERVER Collection --------------------------- - - """ - - def __init__(self, channel): - """Constructor. - - Args: - channel: A grpc.Channel. - """ - self.CollectionHeartbeat = channel.unary_unary( - '/proto.Collection/CollectionHeartbeat', - request_serializer=mainServer__pb2.Empty.SerializeToString, - response_deserializer=mainServer__pb2.CollectionHeartbeatOutput.FromString, - ) - self.PostDBRelay = channel.unary_unary( - '/proto.Collection/PostDBRelay', - request_serializer=mainServer__pb2.PostDBRelayInput.SerializeToString, - response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString, - ) - self.PostMetrics = channel.unary_unary( - '/proto.Collection/PostMetrics', - request_serializer=mainServer__pb2.PostMetricsInput.SerializeToString, - response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString, - ) - - -class CollectionServicer(object): - """------------------------ SERVER Collection --------------------------- - - """ - - def CollectionHeartbeat(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def PostDBRelay(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - def PostMetrics(self, request, context): - # missing associated documentation comment in .proto file - pass - context.set_code(grpc.StatusCode.UNIMPLEMENTED) - context.set_details('Method not implemented!') - raise NotImplementedError('Method not implemented!') - - -def add_CollectionServicer_to_server(servicer, server): - rpc_method_handlers = { - 'CollectionHeartbeat': grpc.unary_unary_rpc_method_handler( - servicer.CollectionHeartbeat, - request_deserializer=mainServer__pb2.Empty.FromString, - response_serializer=mainServer__pb2.CollectionHeartbeatOutput.SerializeToString, - ), - 'PostDBRelay': grpc.unary_unary_rpc_method_handler( - servicer.PostDBRelay, - request_deserializer=mainServer__pb2.PostDBRelayInput.FromString, - response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString, - ), - 'PostMetrics': grpc.unary_unary_rpc_method_handler( - servicer.PostMetrics, - request_deserializer=mainServer__pb2.PostMetricsInput.FromString, - response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString, - ), - } - generic_handler = grpc.method_handlers_generic_handler( - 'proto.Collection', rpc_method_handlers) - server.add_generic_rpc_handlers((generic_handler,)) diff --git a/src/pybind/mgr/diskprediction/common/clusterdata.py b/src/pybind/mgr/diskprediction/common/clusterdata.py deleted file mode 100644 index 3810f0e9be0..00000000000 --- a/src/pybind/mgr/diskprediction/common/clusterdata.py +++ /dev/null @@ -1,511 +0,0 @@ -""" -Ceph database API - -""" -from __future__ import absolute_import - -import json -import rbd -import rados -from mgr_module import CommandResult - - -RBD_FEATURES_NAME_MAPPING = { - rbd.RBD_FEATURE_LAYERING: 'layering', - rbd.RBD_FEATURE_STRIPINGV2: 'striping', - rbd.RBD_FEATURE_EXCLUSIVE_LOCK: 'exclusive-lock', - rbd.RBD_FEATURE_OBJECT_MAP: 'object-map', - rbd.RBD_FEATURE_FAST_DIFF: 'fast-diff', - rbd.RBD_FEATURE_DEEP_FLATTEN: 'deep-flatten', - rbd.RBD_FEATURE_JOURNALING: 'journaling', - rbd.RBD_FEATURE_DATA_POOL: 'data-pool', - rbd.RBD_FEATURE_OPERATIONS: 'operations', -} - - -def differentiate(data1, data2): - """ - # >>> times = [0, 2] - # >>> values = [100, 101] - # >>> differentiate(*zip(times, values)) - 0.5 - """ - return (data2[1] - data1[1]) / float(data2[0] - data1[0]) - - -class ClusterAPI(object): - def __init__(self, module_obj): - self.module = module_obj - - @staticmethod - def format_bitmask(features): - """ - Formats the bitmask: - # >>> format_bitmask(45) - ['deep-flatten', 'exclusive-lock', 'layering', 'object-map'] - """ - names = [val for key, val in RBD_FEATURES_NAME_MAPPING.items() - if key & features == key] - return sorted(names) - - def _open_connection(self, pool_name='device_health_metrics'): - pools = self.module.rados.list_pools() - is_pool = False - for pool in pools: - if pool == pool_name: - is_pool = True - break - if not is_pool: - self.module.log.debug('create %s pool' % pool_name) - # create pool - result = CommandResult('') - self.module.send_command(result, 'mon', '', json.dumps({ - 'prefix': 'osd pool create', - 'format': 'json', - 'pool': pool_name, - 'pg_num': 1, - }), '') - r, outb, outs = result.wait() - assert r == 0 - - # set pool application - result = CommandResult('') - self.module.send_command(result, 'mon', '', json.dumps({ - 'prefix': 'osd pool application enable', - 'format': 'json', - 'pool': pool_name, - 'app': 'mgr_devicehealth', - }), '') - r, outb, outs = result.wait() - assert r == 0 - - ioctx = self.module.rados.open_ioctx(pool_name) - return ioctx - - @classmethod - def _rbd_disk_usage(cls, image, snaps, whole_object=True): - class DUCallback(object): - def __init__(self): - self.used_size = 0 - - def __call__(self, offset, length, exists): - if exists: - self.used_size += length - snap_map = {} - prev_snap = None - total_used_size = 0 - for _, size, name in snaps: - image.set_snap(name) - du_callb = DUCallback() - image.diff_iterate(0, size, prev_snap, du_callb, - whole_object=whole_object) - snap_map[name] = du_callb.used_size - total_used_size += du_callb.used_size - prev_snap = name - return total_used_size, snap_map - - def _rbd_image(self, ioctx, pool_name, image_name): - with rbd.Image(ioctx, image_name) as img: - stat = img.stat() - stat['name'] = image_name - stat['id'] = img.id() - stat['pool_name'] = pool_name - features = img.features() - stat['features'] = features - stat['features_name'] = self.format_bitmask(features) - - # the following keys are deprecated - del stat['parent_pool'] - del stat['parent_name'] - stat['timestamp'] = '{}Z'.format(img.create_timestamp() - .isoformat()) - stat['stripe_count'] = img.stripe_count() - stat['stripe_unit'] = img.stripe_unit() - stat['data_pool'] = None - try: - parent_info = img.parent_info() - stat['parent'] = { - 'pool_name': parent_info[0], - 'image_name': parent_info[1], - 'snap_name': parent_info[2] - } - except rbd.ImageNotFound: - # no parent image - stat['parent'] = None - # snapshots - stat['snapshots'] = [] - for snap in img.list_snaps(): - snap['timestamp'] = '{}Z'.format( - img.get_snap_timestamp(snap['id']).isoformat()) - snap['is_protected'] = img.is_protected_snap(snap['name']) - snap['used_bytes'] = None - snap['children'] = [] - img.set_snap(snap['name']) - for child_pool_name, child_image_name in img.list_children(): - snap['children'].append({ - 'pool_name': child_pool_name, - 'image_name': child_image_name - }) - stat['snapshots'].append(snap) - # disk usage - if 'fast-diff' in stat['features_name']: - snaps = [(s['id'], s['size'], s['name']) - for s in stat['snapshots']] - snaps.sort(key=lambda s: s[0]) - snaps += [(snaps[-1][0]+1 if snaps else 0, stat['size'], None)] - total_prov_bytes, snaps_prov_bytes = self._rbd_disk_usage( - img, snaps, True) - stat['total_disk_usage'] = total_prov_bytes - for snap, prov_bytes in snaps_prov_bytes.items(): - if snap is None: - stat['disk_usage'] = prov_bytes - continue - for ss in stat['snapshots']: - if ss['name'] == snap: - ss['disk_usage'] = prov_bytes - break - else: - stat['total_disk_usage'] = None - stat['disk_usage'] = None - return stat - - def get_rbd_list(self, pool_name=None): - if pool_name: - pools = [pool_name] - else: - pools = [] - for data in self.get_osd_pools(): - pools.append(data['pool_name']) - result = [] - for pool in pools: - rbd_inst = rbd.RBD() - with self._open_connection(str(pool)) as ioctx: - names = rbd_inst.list(ioctx) - for name in names: - try: - stat = self._rbd_image(ioctx, pool_name, name) - except rbd.ImageNotFound: - continue - result.append(stat) - return result - - def get_pg_summary(self): - return self.module.get('pg_summary') - - def get_df_stats(self): - return self.module.get('df').get('stats', {}) - - def get_object_pg_info(self, pool_name, object_name): - result = CommandResult('') - data_jaon = {} - self.module.send_command( - result, 'mon', '', json.dumps({ - 'prefix': 'osd map', - 'format': 'json', - 'pool': pool_name, - 'object': object_name, - }), '') - ret, outb, outs = result.wait() - try: - if outb: - data_jaon = json.loads(outb) - else: - self.module.log.error('unable to get %s pg info' % pool_name) - except Exception as e: - self.module.log.error( - 'unable to get %s pg, error: %s' % (pool_name, str(e))) - return data_jaon - - def get_rbd_info(self, pool_name, image_name): - with self._open_connection(pool_name) as ioctx: - try: - stat = self._rbd_image(ioctx, pool_name, image_name) - if stat.get('id'): - objects = self.get_pool_objects(pool_name, stat.get('id')) - if objects: - stat['objects'] = objects - stat['pgs'] = list() - for obj_name in objects: - pgs_data = self.get_object_pg_info(pool_name, obj_name) - stat['pgs'].extend([pgs_data]) - except rbd.ImageNotFound: - stat = {} - return stat - - def get_pool_objects(self, pool_name, image_id=None): - # list_objects - objects = [] - with self._open_connection(pool_name) as ioctx: - object_iterator = ioctx.list_objects() - while True: - try: - rados_object = object_iterator.next() - if image_id is None: - objects.append(str(rados_object.key)) - else: - v = str(rados_object.key).split('.') - if len(v) >= 2 and v[1] == image_id: - objects.append(str(rados_object.key)) - except StopIteration: - break - return objects - - def get_global_total_size(self): - total_bytes = \ - self.module.get('df').get('stats', {}).get('total_bytes') - total_size = float(total_bytes) / (1024 * 1024 * 1024) - return round(total_size) - - def get_global_avail_size(self): - total_avail_bytes = \ - self.module.get('df').get('stats', {}).get('total_avail_bytes') - total_avail_size = float(total_avail_bytes) / (1024 * 1024 * 1024) - return round(total_avail_size, 2) - - def get_global_raw_used_size(self): - total_used_bytes = \ - self.module.get('df').get('stats', {}).get('total_used_bytes') - total_raw_used_size = float(total_used_bytes) / (1024 * 1024 * 1024) - return round(total_raw_used_size, 2) - - def get_global_raw_used_percent(self): - total_bytes = \ - self.module.get('df').get('stats').get('total_bytes') - total_used_bytes = \ - self.module.get('df').get('stats').get('total_used_bytes') - if total_bytes and total_used_bytes: - total_used_percent = \ - float(total_used_bytes) / float(total_bytes) * 100 - else: - total_used_percent = 0.0 - return round(total_used_percent, 2) - - def get_osd_data(self): - return self.module.get('config').get('osd_data', '') - - def get_osd_journal(self): - return self.module.get('config').get('osd_journal', '') - - def get_osd_metadata(self, osd_id=None): - if osd_id is not None: - return self.module.get('osd_metadata')[str(osd_id)] - return self.module.get('osd_metadata') - - def get_mgr_metadata(self, mgr_id): - return self.module.get_metadata('mgr', mgr_id) - - def get_osd_epoch(self): - return self.module.get('osd_map').get('epoch', 0) - - def get_osds(self): - return self.module.get('osd_map').get('osds', []) - - def get_max_osd(self): - return self.module.get('osd_map').get('max_osd', '') - - def get_osd_pools(self): - return self.module.get('osd_map').get('pools', []) - - def get_pool_bytes_used(self, pool_id): - bytes_used = None - pools = self.module.get('df').get('pools', []) - for pool in pools: - if pool_id == pool['id']: - bytes_used = pool['stats']['bytes_used'] - return bytes_used - - def get_cluster_id(self): - return self.module.get('mon_map').get('fsid') - - def get_health_status(self): - health = json.loads(self.module.get('health')['json']) - return health.get('status') - - def get_health_checks(self): - health = json.loads(self.module.get('health')['json']) - if health.get('checks'): - message = '' - checks = health['checks'] - for key in checks.keys(): - if message: - message += ";" - if checks[key].get('summary', {}).get('message', ""): - message += checks[key]['summary']['message'] - return message - else: - return '' - - def get_mons(self): - return self.module.get('mon_map').get('mons', []) - - def get_mon_status(self): - mon_status = json.loads(self.module.get('mon_status')['json']) - return mon_status - - def get_osd_smart(self, osd_id, device_id=None): - osd_devices = [] - osd_smart = {} - devices = self.module.get('devices') - for dev in devices.get('devices', []): - osd = "" - daemons = dev.get('daemons', []) - for daemon in daemons: - if daemon[4:] != str(osd_id): - continue - osd = daemon - if not osd: - continue - if dev.get('devid'): - osd_devices.append(dev.get('devid')) - for dev_id in osd_devices: - o_key = '' - if device_id and dev_id != device_id: - continue - smart_data = self.get_device_health(dev_id) - if smart_data: - o_key = sorted(smart_data.iterkeys(), reverse=True)[0] - if o_key and smart_data and smart_data.values(): - dev_smart = smart_data[o_key] - if dev_smart: - osd_smart[dev_id] = dev_smart - return osd_smart - - def get_device_health(self, device_id): - res = {} - try: - with self._open_connection() as ioctx: - with rados.ReadOpCtx() as op: - iter, ret = ioctx.get_omap_vals(op, '', '', 500) - assert ret == 0 - try: - ioctx.operate_read_op(op, device_id) - for key, value in list(iter): - v = None - try: - v = json.loads(value) - except ValueError: - self.module.log.error( - 'unable to parse value for %s: "%s"' % (key, value)) - res[key] = v - except IOError: - pass - except OSError as e: - self.module.log.error( - 'unable to get device {} health, {}'.format(device_id, str(e))) - except IOError: - return {} - return res - - def get_osd_hostname(self, osd_id): - result = '' - osd_metadata = self.get_osd_metadata(osd_id) - if osd_metadata: - osd_host = osd_metadata.get('hostname', 'None') - result = osd_host - return result - - def get_osd_device_id(self, osd_id): - result = {} - if not str(osd_id).isdigit(): - if str(osd_id)[0:4] == 'osd.': - osdid = osd_id[4:] - else: - raise Exception('not a valid id or number') - else: - osdid = osd_id - osd_metadata = self.get_osd_metadata(osdid) - if osd_metadata: - osd_device_ids = osd_metadata.get('device_ids', '') - if osd_device_ids: - result = {} - for osd_device_id in osd_device_ids.split(','): - dev_name = '' - if len(str(osd_device_id).split('=')) >= 2: - dev_name = osd_device_id.split('=')[0] - dev_id = osd_device_id.split('=')[1] - else: - dev_id = osd_device_id - if dev_name: - result[dev_name] = {'dev_id': dev_id} - return result - - def get_file_systems(self): - return self.module.get('fs_map').get('filesystems', []) - - def get_pg_stats(self): - return self.module.get('pg_dump').get('pg_stats', []) - - def get_all_perf_counters(self): - return self.module.get_all_perf_counters() - - def get(self, data_name): - return self.module.get(data_name) - - def set_device_life_expectancy(self, device_id, from_date, to_date=None): - result = CommandResult('') - - if to_date is None: - self.module.send_command(result, 'mon', '', json.dumps({ - 'prefix': 'device set-life-expectancy', - 'devid': device_id, - 'from': from_date - }), '') - else: - self.module.send_command(result, 'mon', '', json.dumps({ - 'prefix': 'device set-life-expectancy', - 'devid': device_id, - 'from': from_date, - 'to': to_date - }), '') - ret, outb, outs = result.wait() - if ret != 0: - self.module.log.error( - 'failed to set device life expectancy, %s' % outs) - return ret - - def reset_device_life_expectancy(self, device_id): - result = CommandResult('') - self.module.send_command(result, 'mon', '', json.dumps({ - 'prefix': 'device rm-life-expectancy', - 'devid': device_id - }), '') - ret, outb, outs = result.wait() - if ret != 0: - self.module.log.error( - 'failed to reset device life expectancy, %s' % outs) - return ret - - def get_server(self, hostname): - return self.module.get_server(hostname) - - def get_configuration(self, key): - return self.module.get_configuration(key) - - def get_rate(self, svc_type, svc_name, path): - """returns most recent rate""" - data = self.module.get_counter(svc_type, svc_name, path)[path] - - if data and len(data) > 1: - return differentiate(*data[-2:]) - return 0.0 - - def get_latest(self, daemon_type, daemon_name, counter): - return self.module.get_latest(daemon_type, daemon_name, counter) - - def get_all_information(self): - result = dict() - result['osd_map'] = self.module.get('osd_map') - result['osd_map_tree'] = self.module.get('osd_map_tree') - result['osd_map_crush'] = self.module.get('osd_map_crush') - result['config'] = self.module.get('config') - result['mon_map'] = self.module.get('mon_map') - result['fs_map'] = self.module.get('fs_map') - result['osd_metadata'] = self.module.get('osd_metadata') - result['pg_summary'] = self.module.get('pg_summary') - result['pg_dump'] = self.module.get('pg_dump') - result['io_rate'] = self.module.get('io_rate') - result['df'] = self.module.get('df') - result['osd_stats'] = self.module.get('osd_stats') - result['health'] = self.get_health_status() - result['mon_status'] = self.get_mon_status() - return result diff --git a/src/pybind/mgr/diskprediction/common/cypher.py b/src/pybind/mgr/diskprediction/common/cypher.py deleted file mode 100644 index 7b7b60e5059..00000000000 --- a/src/pybind/mgr/diskprediction/common/cypher.py +++ /dev/null @@ -1,71 +0,0 @@ -from __future__ import absolute_import - -import time - - -class NodeInfo(object): - """ Neo4j Node information """ - def __init__(self, label, domain_id, name, meta): - self.label = label - self.domain_id = domain_id - self.name = name - self.meta = meta - - -class CypherOP(object): - """ Cypher Operation """ - - @staticmethod - def update(node, key, value, timestamp=int(time.time()*(1000**3))): - result = '' - if isinstance(node, NodeInfo): - if key != 'time': - cy_value = '\'%s\'' % value - else: - cy_value = value - result = \ - 'set %s.%s=case when %s.time >= %s then %s.%s ELSE %s end' % ( - node.label, key, node.label, timestamp, node.label, key, - cy_value) - return result - - @staticmethod - def create_or_merge(node, timestamp=int(time.time()*(1000**3))): - result = '' - if isinstance(node, NodeInfo): - meta_list = [] - if isinstance(node.meta, dict): - for key, value in node.meta.items(): - meta_list.append(CypherOP.update(node, key, value, timestamp)) - domain_id = '{domainId:\'%s\'}' % node.domain_id - if meta_list: - result = 'merge (%s:%s %s) %s %s %s' % ( - node.label, node.label, - domain_id, - CypherOP.update(node, 'name', node.name, timestamp), - ' '.join(meta_list), - CypherOP.update(node, 'time', timestamp, timestamp)) - else: - result = 'merge (%s:%s %s) %s %s' % ( - node.label, node.label, - domain_id, - CypherOP.update(node, 'name', node.name, timestamp), - CypherOP.update(node, 'time', timestamp, timestamp)) - return result - - @staticmethod - def add_link(snode, dnode, relationship, timestamp=None): - result = '' - if timestamp is None: - timestamp = int(time.time()*(1000**3)) - if isinstance(snode, NodeInfo) and isinstance(dnode, NodeInfo): - cy_snode = CypherOP.create_or_merge(snode, timestamp) - cy_dnode = CypherOP.create_or_merge(dnode, timestamp) - target = snode.label + dnode.label - link = 'merge (%s)-[%s:%s]->(%s) set %s.time=case when %s.time >= %s then %s.time ELSE %s end' % ( - snode.label, target, relationship, - dnode.label, target, - target, timestamp, - target, timestamp) - result = '%s %s %s' % (cy_snode, cy_dnode, link) - return result diff --git a/src/pybind/mgr/diskprediction/common/grpcclient.py b/src/pybind/mgr/diskprediction/common/grpcclient.py deleted file mode 100644 index e12a8f3d6b6..00000000000 --- a/src/pybind/mgr/diskprediction/common/grpcclient.py +++ /dev/null @@ -1,235 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -import grpc -import json -from logging import getLogger -import os -import time - -from . import DummyResonse -from . import client_pb2 -from . import client_pb2_grpc - - -def gen_configuration(**kwargs): - configuration = { - 'host': kwargs.get('host', 'api.diskprophet.com'), - 'user': kwargs.get('user'), - 'password': kwargs.get('password'), - 'port': kwargs.get('port', 31400), - 'mgr_inst': kwargs.get('mgr_inst', None), - 'cert_context': kwargs.get('cert_context'), - 'ssl_target_name': kwargs.get('ssl_target_name', 'api.diskprophet.com'), - 'default_authority': kwargs.get('default_authority', 'api.diskprophet.com')} - return configuration - - -class GRPcClient: - - def __init__(self, configuration): - self.auth = None - self.channel = None - self.host = configuration.get('host') - self.port = configuration.get('port') - if configuration.get('user') and configuration.get('password'): - self.auth = ( - ('account', configuration.get('user')), - ('password', configuration.get('password'))) - self.cert_context = configuration.get('cert_context') - self.ssl_target_name = configuration.get('ssl_target_name') - self.default_authority = configuration.get('default_authority') - self.mgr_inst = configuration.get('mgr_inst') - if self.mgr_inst: - self._logger = self.mgr_inst.log - else: - self._logger = getLogger() - self._get_channel() - - def __nonzero__(self): - if self.channel: - return True - else: - return False - - def _get_channel(self): - try: - creds = grpc.ssl_channel_credentials( - root_certificates=self.cert_context) - self.channel = \ - grpc.secure_channel('{}:{}'.format( - self.host, self.port), creds, - options=(('grpc.ssl_target_name_override', self.ssl_target_name,), - ('grpc.default_authority', self.default_authority),)) - except Exception as e: - self._logger.error( - 'failed to create connection exception: {}'.format( - ';'.join(str(e).split('\n\t')))) - - def test_connection(self): - try: - stub_accout = client_pb2_grpc.AccountStub(self.channel) - result = stub_accout.AccountHeartbeat(client_pb2.Empty()) - if result and "is alive" in str(result.message): - return True - else: - return False - except Exception as e: - self._logger.error( - 'failed to test connection exception: {}'.format( - ';'.join(str(e).split('\n\t')))) - return False - - def _send_metrics(self, data, measurement): - status_info = dict() - status_info['measurement'] = None - status_info['success_count'] = 0 - status_info['failure_count'] = 0 - for dp_data in data: - d_measurement = dp_data.measurement - if not d_measurement: - status_info['measurement'] = measurement - else: - status_info['measurement'] = d_measurement - tag_list = [] - field_list = [] - for name in dp_data.tags: - tag = '{}={}'.format(name, dp_data.tags[name]) - tag_list.append(tag) - for name in dp_data.fields: - if dp_data.fields[name] is None: - continue - if isinstance(dp_data.fields[name], str): - field = '{}=\"{}\"'.format(name, dp_data.fields[name]) - elif isinstance(dp_data.fields[name], bool): - field = '{}={}'.format(name, - str(dp_data.fields[name]).lower()) - elif (isinstance(dp_data.fields[name], int) or - isinstance(dp_data.fields[name], long)): - field = '{}={}i'.format(name, dp_data.fields[name]) - else: - field = '{}={}'.format(name, dp_data.fields[name]) - field_list.append(field) - data = '{},{} {} {}'.format( - status_info['measurement'], - ','.join(tag_list), - ','.join(field_list), - int(time.time() * 1000 * 1000 * 1000)) - try: - resp = self._send_info(data=[data], measurement=status_info['measurement']) - status_code = resp.status_code - if 200 <= status_code < 300: - self._logger.debug( - '{} send diskprediction api success(ret: {})'.format( - status_info['measurement'], status_code)) - status_info['success_count'] += 1 - else: - self._logger.error( - 'return code: {}, content: {}'.format( - status_code, resp.content)) - status_info['failure_count'] += 1 - except Exception as e: - status_info['failure_count'] += 1 - self._logger.error(str(e)) - return status_info - - def _send_db_relay(self, data, measurement): - status_info = dict() - status_info['measurement'] = measurement - status_info['success_count'] = 0 - status_info['failure_count'] = 0 - for dp_data in data: - try: - resp = self._send_info( - data=[dp_data.fields['cmd']], measurement=measurement) - status_code = resp.status_code - if 200 <= status_code < 300: - self._logger.debug( - '{} send diskprediction api success(ret: {})'.format( - measurement, status_code)) - status_info['success_count'] += 1 - else: - self._logger.error( - 'return code: {}, content: {}'.format( - status_code, resp.content)) - status_info['failure_count'] += 1 - except Exception as e: - status_info['failure_count'] += 1 - self._logger.error(str(e)) - return status_info - - def send_info(self, data, measurement): - """ - :param data: data structure - :param measurement: data measurement class name - :return: - status_info = { - 'success_count': , - 'failure_count': - } - """ - if measurement == 'db_relay': - return self._send_db_relay(data, measurement) - else: - return self._send_metrics(data, measurement) - - def _send_info(self, data, measurement): - resp = DummyResonse() - try: - stub_collection = client_pb2_grpc.CollectionStub(self.channel) - if measurement == 'db_relay': - result = stub_collection.PostDBRelay( - client_pb2.PostDBRelayInput(cmds=data), metadata=self.auth) - else: - result = stub_collection.PostMetrics( - client_pb2.PostMetricsInput(points=data), metadata=self.auth) - if result and 'success' in str(result.message).lower(): - resp.status_code = 200 - resp.content = '' - else: - resp.status_code = 400 - resp.content = ';'.join(str(result).split('\n\t')) - self._logger.error( - 'failed to send info: {}'.format(resp.content)) - except Exception as e: - resp.status_code = 400 - resp.content = ';'.join(str(e).split('\n\t')) - self._logger.error( - 'failed to send info exception: {}'.format(resp.content)) - return resp - - def query_info(self, host_domain_id, disk_domain_id, measurement): - resp = DummyResonse() - try: - stub_dp = client_pb2_grpc.DiskprophetStub(self.channel) - predicted = stub_dp.DPGetDisksPrediction( - client_pb2.DPGetDisksPredictionInput( - physicalDiskIds=disk_domain_id), - metadata=self.auth) - if predicted and hasattr(predicted, 'data'): - resp.status_code = 200 - resp.content = '' - resp_json = json.loads(predicted.data) - rc = resp_json.get('results', []) - if rc: - series = rc[0].get('series', []) - if series: - values = series[0].get('values', []) - if not values: - resp.resp_json = {} - else: - columns = series[0].get('columns', []) - for item in values: - # get prediction key and value from server. - for name, value in zip(columns, item): - # process prediction data - resp.resp_json[name] = value - return resp - else: - resp.status_code = 400 - resp.content = '' - resp.resp_json = {'error': ';'.join(str(predicted).split('\n\t'))} - return resp - except Exception as e: - resp.status_code = 400 - resp.content = ';'.join(str(e).split('\n\t')) - resp.resp_json = {'error': resp.content} - return resp diff --git a/src/pybind/mgr/diskprediction/common/localpredictor.py b/src/pybind/mgr/diskprediction/common/localpredictor.py deleted file mode 100644 index 2a79aee6c77..00000000000 --- a/src/pybind/mgr/diskprediction/common/localpredictor.py +++ /dev/null @@ -1,120 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 -# from __future__ import absolute_import -from __future__ import absolute_import -from logging import getLogger -import time - -from . import DummyResonse -from .clusterdata import ClusterAPI -from ..predictor.disk_failure_predictor import DiskFailurePredictor, get_diskfailurepredictor_path - - -def gen_configuration(**kwargs): - configuration = { - 'mgr_inst': kwargs.get('mgr_inst', None)} - return configuration - - -class LocalPredictor: - - def __init__(self, configuration): - self.mgr_inst = configuration.get('mgr_inst') - if self.mgr_inst: - self._logger = self.mgr_inst.log - else: - self._logger = getLogger() - - def __nonzero__(self): - if self.mgr_inst: - return True - else: - return False - - def test_connection(self): - resp = DummyResonse() - resp.status_code = 200 - resp.content = '' - return resp - - def send_info(self, data, measurement): - status_info = dict() - status_info['measurement'] = measurement - status_info['success_count'] = 0 - status_info['failure_count'] = 0 - for dp_data in data: - try: - resp = self._send_info(data=dp_data, measurement=measurement) - status_code = resp.status_code - if 200 <= status_code < 300: - self._logger.debug( - '%s send diskprediction api success(ret: %s)' - % (measurement, status_code)) - status_info['success_count'] += 1 - else: - self._logger.error( - 'return code: %s, content: %s, data: %s' % ( - status_code, resp.content, data)) - status_info['failure_count'] += 1 - except Exception as e: - status_info['failure_count'] += 1 - self._logger.error(str(e)) - return status_info - - def _send_info(self, data, measurement): - resp = DummyResonse() - resp.status_code = 200 - resp.content = '' - return resp - - def _local_predict(self, smart_datas): - obj_predictor = DiskFailurePredictor() - predictor_path = get_diskfailurepredictor_path() - models_path = "{}/models".format(predictor_path) - obj_predictor.initialize(models_path) - return obj_predictor.predict(smart_datas) - - def query_info(self, host_domain_id, disk_domain_id, measurement): - predict_datas = list() - obj_api = ClusterAPI(self.mgr_inst) - predicted_result = 'Unknown' - smart_datas = obj_api.get_device_health(disk_domain_id) - if len(smart_datas) >= 6: - o_keys = sorted(smart_datas.iterkeys(), reverse=True) - for o_key in o_keys: - dev_smart = {} - s_val = smart_datas[o_key] - ata_smart = s_val.get('ata_smart_attributes', {}) - for attr in ata_smart.get('table', []): - if attr.get('raw', {}).get('string'): - if str(attr.get('raw', {}).get('string', '0')).isdigit(): - dev_smart['smart_%s_raw' % attr.get('id')] = \ - int(attr.get('raw', {}).get('string', '0')) - else: - if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit(): - dev_smart['smart_%s_raw' % attr.get('id')] = \ - int(attr.get('raw', {}).get('string', - '0').split(' ')[0]) - else: - dev_smart['smart_%s_raw' % attr.get('id')] = \ - attr.get('raw', {}).get('value', 0) - if s_val.get('power_on_time', {}).get('hours') is not None: - dev_smart['smart_9_raw'] = int(s_val['power_on_time']['hours']) - if dev_smart: - predict_datas.append(dev_smart) - - if predict_datas: - predicted_result = self._local_predict(predict_datas) - resp = DummyResonse() - resp.status_code = 200 - resp.resp_json = { - "disk_domain_id": disk_domain_id, - "near_failure": predicted_result, - "predicted": int(time.time() * (1000 ** 3))} - return resp - else: - resp = DummyResonse() - resp.status_code = 400 - resp.content = '\'predict\' need least 6 pieces disk smart data' - resp.resp_json = \ - {'error': '\'predict\' need least 6 pieces disk smart data'} - return resp diff --git a/src/pybind/mgr/diskprediction/module.py b/src/pybind/mgr/diskprediction/module.py deleted file mode 100644 index 1edac76531b..00000000000 --- a/src/pybind/mgr/diskprediction/module.py +++ /dev/null @@ -1,376 +0,0 @@ -""" -A diskprediction module -""" -from __future__ import absolute_import - -from datetime import datetime -import errno -import json -from mgr_module import MgrModule -import os -from threading import Event - -from .common import DP_MGR_STAT_ENABLED, DP_MGR_STAT_DISABLED -from .task import MetricsRunner, PredictionRunner, SmartRunner - - -DP_AGENTS = [MetricsRunner, SmartRunner, PredictionRunner] - - -class Module(MgrModule): - - OPTIONS = [ - { - 'name': 'diskprediction_server', - 'default': '' - }, - { - 'name': 'diskprediction_port', - 'default': '31400' - }, - { - 'name': 'diskprediction_user', - 'default': '' - }, - { - 'name': 'diskprediction_password', - 'default': '' - }, - { - 'name': 'diskprediction_upload_metrics_interval', - 'default': '600' - }, - { - 'name': 'diskprediction_upload_smart_interval', - 'default': '43200' - }, - { - 'name': 'diskprediction_retrieve_prediction_interval', - 'default': '43200' - }, - { - 'name': 'diskprediction_cert_context', - 'default': '' - }, - { - 'name': 'diskprediction_ssl_target_name_override', - 'default': 'api.diskprophet.com' - }, - { - 'name': 'diskprediction_default_authority', - 'default': 'api.diskprophet.com' - } - ] - - COMMANDS = [ - { - 'cmd': 'device show-prediction-config', - 'desc': 'Prints diskprediction configuration', - 'perm': 'r' - }, - { - 'cmd': 'device set-cloud-prediction-config ' - 'name=server,type=CephString,req=true ' - 'name=user,type=CephString,req=true ' - 'name=password,type=CephString,req=true ' - 'name=certfile,type=CephString,req=true ' - 'name=port,type=CephString,req=false ', - 'desc': 'Configure Disk Prediction service', - 'perm': 'rw' - }, - { - 'cmd': 'device get-predicted-status ' - 'name=dev_id,type=CephString,req=true', - 'desc': 'Get physical device predicted result', - 'perm': 'r' - }, - { - 'cmd': 'device debug metrics-forced', - 'desc': 'Run metrics agent forced', - 'perm': 'r' - }, - { - 'cmd': 'device debug prediction-forced', - 'desc': 'Run prediction agent forced', - 'perm': 'r' - }, - { - 'cmd': 'device debug smart-forced', - 'desc': 'Run smart agent forced', - 'perm': 'r' - }, - { - 'cmd': 'device predict-life-expectancy ' - 'name=dev_id,type=CephString,req=true', - 'desc': 'Predict life expectancy with local predictor', - 'perm': 'r' - }, - { - 'cmd': 'diskprediction self-test', - 'desc': 'Prints hello world to mgr.x.log', - 'perm': 'r' - }, - { - 'cmd': 'diskprediction status', - 'desc': 'Check diskprediction status', - 'perm': 'r' - } - ] - - def __init__(self, *args, **kwargs): - super(Module, self).__init__(*args, **kwargs) - self.status = {'status': DP_MGR_STAT_DISABLED} - self.shutdown_event = Event() - self._agents = [] - self._activated_cloud = False - self._activated_local = False - self._prediction_result = {} - self.config = dict() - - @property - def config_keys(self): - return dict((o['name'], o.get('default', None)) for o in self.OPTIONS) - - def set_config_option(self, option, value): - if option not in self.config_keys.keys(): - raise RuntimeError('{0} is a unknown configuration ' - 'option'.format(option)) - - if option in ['diskprediction_port', - 'diskprediction_upload_metrics_interval', - 'diskprediction_upload_smart_interval', - 'diskprediction_retrieve_prediction_interval']: - if not str(value).isdigit(): - raise RuntimeError('invalid {} configured. Please specify ' - 'a valid integer {}'.format(option, value)) - - self.log.debug('Setting in-memory config option %s to: %s', option, - value) - self.set_config(option, value) - self.config[option] = value - - return True - - def get_configuration(self, key): - return self.get_config(key, self.config_keys[key]) - - def _show_prediction_config(self, inbuf, cmd): - self.show_module_config() - return 0, json.dumps(self.config, indent=4), '' - - def _self_test(self, inbuf, cmd): - from .test.test_agents import test_agents - test_agents(self) - return 0, 'self-test completed', '' - - def _set_ssl_target_name(self, inbuf, cmd): - str_ssl_target = cmd.get('ssl_target_name', '') - try: - self.set_config('diskprediction_ssl_target_name_override', str_ssl_target) - return (0, - 'success to config ssl target name', 0) - except Exception as e: - return -errno.EINVAL, '', str(e) - - def _set_ssl_default_authority(self, inbuf, cmd): - str_ssl_authority = cmd.get('ssl_authority', '') - try: - self.set_config('diskprediction_default_authority', str_ssl_authority) - return (0, - 'success to config ssl default authority', 0) - except Exception as e: - return -errno.EINVAL, '', str(e) - - def _get_predicted_status(self, inbuf, cmd): - physical_data = dict() - try: - if not self._prediction_result: - for _agent in self._agents: - if isinstance(_agent, PredictionRunner): - _agent.event.set() - break - pre_data = self._prediction_result.get(cmd['dev_id']) - if pre_data: - p_data = pre_data.get('prediction', {}) - if not p_data.get('predicted'): - predicted = '' - else: - predicted = datetime.fromtimestamp(int( - p_data.get('predicted')) / (1000 ** 3)) - d_data = { - 'near_failure': p_data.get('near_failure'), - 'predicted': str(predicted), - 'serial_number': pre_data.get('serial_number'), - 'disk_wwn': pre_data.get('disk_wwn'), - 'attachment': p_data.get('disk_name', '') - } - physical_data[cmd['dev_id']] = d_data - msg = json.dumps(d_data, indent=4) - else: - msg = 'device %s predicted data not ready' % cmd['dev_id'] - except Exception as e: - if str(e).find('No such file') >= 0: - msg = 'unable to get device {} predicted data'.format( - cmd['dev_id']) - else: - msg = 'unable to get osd {} predicted data, {}'.format( - cmd['dev_id'], str(e)) - self.log.error(msg) - return -errno.EINVAL, '', msg - return 0, msg, '' - - def _set_cloud_prediction_config(self, inbuf, cmd): - trusted_certs = '' - str_cert_path = cmd.get('certfile', '') - if os.path.exists(str_cert_path): - with open(str_cert_path, 'rb') as f: - trusted_certs = f.read() - self.set_config_option( - 'diskprediction_cert_context', trusted_certs) - for _agent in self._agents: - _agent.event.set() - self.set_config('diskprediction_server', cmd['server']) - self.set_config('diskprediction_user', cmd['user']) - self.set_config('diskprediction_password', cmd['password']) - if cmd.get('port'): - self.set_config('diskprediction_port', cmd['port']) - return 0, 'succeed to config cloud mode connection', '' - else: - return -errno.EINVAL, '', 'certification file not existed' - - def _debug_prediction_forced(self, inbuf, cmd): - msg = '' - for _agent in self._agents: - if isinstance(_agent, PredictionRunner): - msg = 'run prediction agent successfully' - _agent.event.set() - return 0, msg, '' - - def _debug_metrics_forced(self, inbuf, cmd): - msg = '' - for _agent in self._agents: - if isinstance(_agent, MetricsRunner): - msg = 'run metrics agent successfully' - _agent.event.set() - return 0, msg, '' - - def _debug_smart_forced(self, inbuf, cmd): - msg = ' ' - for _agent in self._agents: - if isinstance(_agent, SmartRunner): - msg = 'run smart agent successfully' - _agent.event.set() - return 0, msg, '' - - def _status(self, inbuf, cmd): - return 0, json.dumps(self.status), '' - - def _predict_life_expectancy(self, inbuf, cmd): - assert cmd['dev_id'] - from .common.localpredictor import LocalPredictor, gen_configuration - conf = gen_configuration(mgr_inst=self) - obj_predictor = LocalPredictor(conf) - result = obj_predictor.query_info('', cmd['dev_id'], '') - if result.status_code == 200: - near_failure = result.json()['near_failure'] - if near_failure.lower() == 'good': - return 0, '>6w', '' - elif near_failure.lower() == 'warning': - return 0, '>=2w and <=6w', '' - elif near_failure.lower() == 'bad': - return 0, '<2w', '' - else: - return 0, 'unknown', '' - else: - return -errno.ENAVAIL, '', result.content - - def handle_command(self, inbuf, cmd): - for o_cmd in self.COMMANDS: - if cmd['prefix'] == o_cmd['cmd'][:len(cmd['prefix'])]: - fun_name = '' - avgs = o_cmd['cmd'].split(' ') - for avg in avgs: - if avg.lower() == 'diskprediction': - continue - if avg.lower() == 'device': - continue - if '=' in avg or ',' in avg or not avg: - continue - fun_name += '_%s' % avg.replace('-', '_') - if fun_name: - fun = getattr( - self, fun_name) - if fun: - return fun(inbuf, cmd) - return -errno.EINVAL, '', 'cmd not found' - - def show_module_config(self): - self.fsid = self.get('mon_map')['fsid'] - self.log.debug('Found Ceph fsid %s', self.fsid) - - for key, default in self.config_keys.items(): - self.set_config_option(key, self.get_config(key, default)) - - def serve(self): - self.log.info('Starting diskprediction module') - self.status = {'status': DP_MGR_STAT_ENABLED} - - while True: - mode = self.get_option('device_failure_prediction_mode') - if mode == 'cloud': - if not self._activated_cloud: - self.start_cloud_disk_prediction() - else: - if self._activated_cloud: - self.stop_disk_prediction() - if mode == 'local': - if not self._activated_local: - self.start_local_disk_prediction() - else: - if self._activated_local: - self.stop_disk_prediction() - - self.shutdown_event.wait(5) - if self.shutdown_event.is_set(): - break - self.stop_disk_prediction() - - def start_cloud_disk_prediction(self): - assert not self._activated_cloud - for dp_agent in DP_AGENTS: - obj_agent = dp_agent(self) - if obj_agent: - obj_agent.start() - else: - raise Exception('failed to start task %s' % obj_agent.task_name) - self._agents.append(obj_agent) - self._activated_cloud = True - self.log.info('start cloud disk prediction') - - def start_local_disk_prediction(self): - assert not self._activated_local - for dp_agent in [PredictionRunner]: - obj_agent = dp_agent(self) - if obj_agent: - obj_agent.start() - else: - raise Exception('failed to start task %s' % obj_agent.task_name) - self._agents.append(obj_agent) - self._activated_local = True - self.log.info('start local model disk prediction') - - def stop_disk_prediction(self): - assert self._activated_local or self._activated_cloud - self.status = {'status': DP_MGR_STAT_DISABLED} - while self._agents: - dp_agent = self._agents.pop() - dp_agent.terminate() - dp_agent.join(5) - del dp_agent - self._activated_local = False - self._activated_cloud = False - self.log.info('stop disk prediction') - - def shutdown(self): - self.shutdown_event.set() - super(Module, self).shutdown() diff --git a/src/pybind/mgr/diskprediction/predictor/__init__.py b/src/pybind/mgr/diskprediction/predictor/__init__.py deleted file mode 100644 index d056be7308b..00000000000 --- a/src/pybind/mgr/diskprediction/predictor/__init__.py +++ /dev/null @@ -1 +0,0 @@ -'''Initial file for predictors''' diff --git a/src/pybind/mgr/diskprediction/predictor/disk_failure_predictor.py b/src/pybind/mgr/diskprediction/predictor/disk_failure_predictor.py deleted file mode 100644 index bf9b0d7db16..00000000000 --- a/src/pybind/mgr/diskprediction/predictor/disk_failure_predictor.py +++ /dev/null @@ -1,265 +0,0 @@ -"""Sample code for disk failure prediction. - -This sample code is a community version for anyone who is interested in Machine -Learning and care about disk failure. - -This class provides a disk failure prediction module. Given models dirpath to -initialize a predictor instance and then use 6 days data to predict. Predict -function will return a string to indicate disk failure status: "Good", -"Warning", "Bad", or "Unknown". - -An example code is as follows: - ->>> model = disk_failure_predictor.DiskFailurePredictor() ->>> status = model.initialize("./models") ->>> if status: ->>> model.predict(disk_days) -'Bad' - - -Provided by ProphetStor Data Services Inc. -http://www.prophetstor.com/ - -""" - -from __future__ import print_function -import os -import json -import pickle - - -def get_diskfailurepredictor_path(): - path = os.path.abspath(__file__) - dir_path = os.path.dirname(path) - return dir_path - - -class DiskFailurePredictor(object): - """Disk failure prediction - - This class implements a disk failure prediction module. - """ - - CONFIG_FILE = "config.json" - EXCLUDED_ATTRS = ['smart_9_raw', 'smart_241_raw', 'smart_242_raw'] - - def __init__(self): - """ - This function may throw exception due to wrong file operation. - """ - - self.model_dirpath = "" - self.model_context = {} - - def initialize(self, model_dirpath): - """ - Initialize all models. - - Args: None - - Returns: - Error message. If all goes well, return an empty string. - - Raises: - """ - - config_path = os.path.join(model_dirpath, self.CONFIG_FILE) - if not os.path.isfile(config_path): - return "Missing config file: " + config_path - else: - with open(config_path) as f_conf: - self.model_context = json.load(f_conf) - - for model_name in self.model_context: - model_path = os.path.join(model_dirpath, model_name) - - if not os.path.isfile(model_path): - return "Missing model file: " + model_path - - self.model_dirpath = model_dirpath - - def __preprocess(self, disk_days): - """ - Preprocess disk attributes. - - Args: - disk_days: Refer to function predict(...). - - Returns: - new_disk_days: Processed disk days. - """ - - req_attrs = [] - new_disk_days = [] - - attr_list = set.intersection(*[set(disk_day.keys()) - for disk_day in disk_days]) - for attr in attr_list: - if (attr.startswith('smart_') and attr.endswith('_raw')) and \ - attr not in self.EXCLUDED_ATTRS: - req_attrs.append(attr) - - for disk_day in disk_days: - new_disk_day = {} - for attr in req_attrs: - if float(disk_day[attr]) >= 0.0: - new_disk_day[attr] = disk_day[attr] - - new_disk_days.append(new_disk_day) - - return new_disk_days - - @staticmethod - def __get_diff_attrs(disk_days): - """ - Get 5 days differential attributes. - - Args: - disk_days: Refer to function predict(...). - - Returns: - attr_list: All S.M.A.R.T. attributes used in given disk. Here we - use intersection set of all disk days. - - diff_disk_days: A list struct comprises 5 dictionaries, each - dictionary contains differential attributes. - - Raises: - Exceptions of wrong list/dict operations. - """ - - all_attrs = [set(disk_day.keys()) for disk_day in disk_days] - attr_list = list(set.intersection(*all_attrs)) - attr_list = disk_days[0].keys() - prev_days = disk_days[:-1] - curr_days = disk_days[1:] - diff_disk_days = [] - - for prev, cur in zip(prev_days, curr_days): - diff_disk_days.append({attr:(int(cur[attr]) - int(prev[attr])) - for attr in attr_list}) - - return attr_list, diff_disk_days - - def __get_best_models(self, attr_list): - """ - Find the best model from model list according to given attribute list. - - Args: - attr_list: All S.M.A.R.T. attributes used in given disk. - - Returns: - modelpath: The best model for the given attribute list. - model_attrlist: 'Ordered' attribute list of the returned model. - Must be aware that SMART attributes is in order. - - Raises: - """ - - models = self.model_context.keys() - - scores = [] - for model_name in models: - scores.append(sum(attr in attr_list - for attr in self.model_context[model_name])) - max_score = max(scores) - - # Skip if too few matched attributes. - if max_score < 3: - print("Too few matched attributes") - return None - - best_models = {} - best_model_indices = [idx for idx, score in enumerate(scores) - if score > max_score - 2] - for model_idx in best_model_indices: - model_name = list(models)[model_idx] - model_path = os.path.join(self.model_dirpath, model_name) - model_attrlist = self.model_context[model_name] - best_models[model_path] = model_attrlist - - return best_models - # return os.path.join(self.model_dirpath, model_name), model_attrlist - - @staticmethod - def __get_ordered_attrs(disk_days, model_attrlist): - """ - Return ordered attributes of given disk days. - - Args: - disk_days: Unordered disk days. - model_attrlist: Model's ordered attribute list. - - Returns: - ordered_attrs: Ordered disk days. - - Raises: None - """ - - ordered_attrs = [] - - for one_day in disk_days: - one_day_attrs = [] - - for attr in model_attrlist: - if attr in one_day: - one_day_attrs.append(one_day[attr]) - else: - one_day_attrs.append(0) - - ordered_attrs.append(one_day_attrs) - - return ordered_attrs - - def predict(self, disk_days): - """ - Predict using given 6-days disk S.M.A.R.T. attributes. - - Args: - disk_days: A list struct comprises 6 dictionaries. These - dictionaries store 'consecutive' days of disk SMART - attributes. - Returns: - A string indicates prediction result. One of following four strings - will be returned according to disk failure status: - (1) Good : Disk is health - (2) Warning : Disk has some symptoms but may not fail immediately - (3) Bad : Disk is in danger and data backup is highly recommended - (4) Unknown : Not enough data for prediction. - - Raises: - Pickle exceptions - """ - - all_pred = [] - - proc_disk_days = self.__preprocess(disk_days) - attr_list, diff_data = DiskFailurePredictor.__get_diff_attrs(proc_disk_days) - modellist = self.__get_best_models(attr_list) - if modellist is None: - return "Unknown" - - for modelpath in modellist: - model_attrlist = modellist[modelpath] - ordered_data = DiskFailurePredictor.__get_ordered_attrs( - diff_data, model_attrlist) - - try: - with open(modelpath, 'rb') as f_model: - clf = pickle.load(f_model) - - except UnicodeDecodeError: - # Compatibility for python3 - with open(modelpath, 'rb') as f_model: - clf = pickle.load(f_model, encoding='latin1') - - pred = clf.predict(ordered_data) - - all_pred.append(1 if any(pred) else 0) - - score = 2 ** sum(all_pred) - len(modellist) - if score > 10: - return "Bad" - if score > 4: - return "Warning" - return "Good" diff --git a/src/pybind/mgr/diskprediction/predictor/models/config.json b/src/pybind/mgr/diskprediction/predictor/models/config.json deleted file mode 100644 index 9a1485ca35d..00000000000 --- a/src/pybind/mgr/diskprediction/predictor/models/config.json +++ /dev/null @@ -1,77 +0,0 @@ -{ -"svm_123.pkl": ["smart_197_raw", "smart_183_raw", "smart_200_raw", "smart_194_raw", "smart_254_raw", "smart_252_raw", "smart_4_raw", "smart_222_raw", "smart_187_raw", "smart_184_raw"], -"svm_105.pkl": ["smart_197_raw", "smart_4_raw", "smart_5_raw", "smart_252_raw", "smart_184_raw", "smart_223_raw", "smart_198_raw", "smart_10_raw", "smart_189_raw", "smart_222_raw"], -"svm_82.pkl":["smart_184_raw", "smart_2_raw", "smart_187_raw", "smart_225_raw", "smart_198_raw", "smart_197_raw", "smart_4_raw", "smart_13_raw", "smart_188_raw", "smart_251_raw"], -"svm_186.pkl":["smart_3_raw", "smart_11_raw", "smart_198_raw", "smart_250_raw", "smart_13_raw", "smart_200_raw", "smart_224_raw", "smart_187_raw", "smart_22_raw", "smart_4_raw", "smart_220_raw"], -"svm_14.pkl":["smart_12_raw", "smart_226_raw", "smart_187_raw", "smart_196_raw", "smart_5_raw", "smart_183_raw", "smart_255_raw", "smart_250_raw", "smart_201_raw", "smart_8_raw"], -"svm_10.pkl":["smart_251_raw", "smart_4_raw", "smart_223_raw", "smart_13_raw", "smart_255_raw", "smart_188_raw", "smart_197_raw", "smart_201_raw", "smart_250_raw", "smart_15_raw"], -"svm_235.pkl":["smart_15_raw", "smart_255_raw", "smart_252_raw", "smart_197_raw", "smart_250_raw", "smart_254_raw", "smart_13_raw", "smart_251_raw", "smart_198_raw", "smart_189_raw", "smart_191_raw"], -"svm_234.pkl":["smart_187_raw", "smart_183_raw", "smart_3_raw", "smart_4_raw", "smart_222_raw", "smart_184_raw", "smart_5_raw", "smart_198_raw", "smart_200_raw", "smart_8_raw", "smart_10_raw"], -"svm_119.pkl":["smart_254_raw", "smart_8_raw", "smart_183_raw", "smart_184_raw", "smart_195_raw", "smart_252_raw", "smart_191_raw", "smart_10_raw", "smart_200_raw", "smart_197_raw"], -"svm_227.pkl":["smart_254_raw", "smart_189_raw", "smart_225_raw", "smart_224_raw", "smart_197_raw", "smart_223_raw", "smart_4_raw", "smart_183_raw", "smart_11_raw", "smart_184_raw", "smart_13_raw"], -"svm_18.pkl":["smart_197_raw", "smart_3_raw", "smart_220_raw", "smart_193_raw", "smart_10_raw", "smart_187_raw", "smart_188_raw", "smart_225_raw", "smart_194_raw", "smart_13_raw"], -"svm_78.pkl":["smart_10_raw", "smart_183_raw", "smart_191_raw", "smart_13_raw", "smart_198_raw", "smart_22_raw", "smart_195_raw", "smart_12_raw", "smart_224_raw", "smart_200_raw"], -"svm_239.pkl":["smart_3_raw", "smart_254_raw", "smart_199_raw", "smart_225_raw", "smart_187_raw", "smart_195_raw", "smart_197_raw", "smart_2_raw", "smart_193_raw", "smart_220_raw", "smart_183_raw"], -"svm_174.pkl":["smart_183_raw", "smart_196_raw", "smart_225_raw", "smart_189_raw", "smart_4_raw", "smart_3_raw", "smart_9_raw", "smart_198_raw", "smart_15_raw", "smart_5_raw", "smart_194_raw"], -"svm_104.pkl":["smart_12_raw", "smart_198_raw", "smart_197_raw", "smart_4_raw", "smart_240_raw", "smart_187_raw", "smart_225_raw", "smart_8_raw", "smart_3_raw", "smart_2_raw"], -"svm_12.pkl":["smart_222_raw", "smart_251_raw", "smart_194_raw", "smart_9_raw", "smart_184_raw", "smart_191_raw", "smart_187_raw", "smart_255_raw", "smart_4_raw", "smart_11_raw"], -"svm_97.pkl":["smart_15_raw", "smart_197_raw", "smart_190_raw", "smart_199_raw", "smart_200_raw", "smart_12_raw", "smart_191_raw", "smart_254_raw", "smart_194_raw", "smart_201_raw"], -"svm_118.pkl":["smart_11_raw", "smart_225_raw", "smart_196_raw", "smart_197_raw", "smart_198_raw", "smart_200_raw", "smart_3_raw", "smart_10_raw", "smart_191_raw", "smart_22_raw"], -"svm_185.pkl":["smart_191_raw", "smart_254_raw", "smart_3_raw", "smart_190_raw", "smart_15_raw", "smart_22_raw", "smart_2_raw", "smart_198_raw", "smart_13_raw", "smart_226_raw", "smart_225_raw"], -"svm_206.pkl":["smart_183_raw", "smart_192_raw", "smart_197_raw", "smart_255_raw", "smart_187_raw", "smart_254_raw", "smart_198_raw", "smart_13_raw", "smart_226_raw", "smart_240_raw", "smart_8_raw"], -"svm_225.pkl":["smart_224_raw", "smart_11_raw", "smart_5_raw", "smart_4_raw", "smart_225_raw", "smart_197_raw", "smart_15_raw", "smart_183_raw", "smart_193_raw", "smart_190_raw", "smart_187_raw"], -"svm_169.pkl":["smart_252_raw", "smart_183_raw", "smart_254_raw", "smart_11_raw", "smart_193_raw", "smart_22_raw", "smart_226_raw", "smart_189_raw", "smart_225_raw", "smart_198_raw", "smart_200_raw"], -"svm_79.pkl":["smart_184_raw", "smart_196_raw", "smart_4_raw", "smart_226_raw", "smart_199_raw", "smart_187_raw", "smart_193_raw", "smart_188_raw", "smart_12_raw", "smart_250_raw"], -"svm_69.pkl":["smart_187_raw", "smart_9_raw", "smart_200_raw", "smart_11_raw", "smart_252_raw", "smart_189_raw", "smart_4_raw", "smart_188_raw", "smart_255_raw", "smart_201_raw"], -"svm_201.pkl":["smart_224_raw", "smart_8_raw", "smart_250_raw", "smart_2_raw", "smart_198_raw", "smart_15_raw", "smart_193_raw", "smart_223_raw", "smart_3_raw", "smart_11_raw", "smart_191_raw"], -"svm_114.pkl":["smart_226_raw", "smart_188_raw", "smart_2_raw", "smart_11_raw", "smart_4_raw", "smart_193_raw", "smart_184_raw", "smart_194_raw", "smart_198_raw", "smart_13_raw"], -"svm_219.pkl":["smart_12_raw", "smart_22_raw", "smart_8_raw", "smart_191_raw", "smart_197_raw", "smart_254_raw", "smart_15_raw", "smart_193_raw", "smart_199_raw", "smart_225_raw", "smart_192_raw"], -"svm_168.pkl":["smart_255_raw", "smart_191_raw", "smart_193_raw", "smart_220_raw", "smart_5_raw", "smart_3_raw", "smart_222_raw", "smart_223_raw", "smart_197_raw", "smart_196_raw", "smart_22_raw"], -"svm_243.pkl":["smart_11_raw", "smart_255_raw", "smart_10_raw", "smart_189_raw", "smart_225_raw", "smart_240_raw", "smart_222_raw", "smart_197_raw", "smart_183_raw", "smart_198_raw", "smart_12_raw"], -"svm_195.pkl":["smart_183_raw", "smart_5_raw", "smart_11_raw", "smart_197_raw", "smart_15_raw", "smart_9_raw", "smart_4_raw", "smart_220_raw", "smart_12_raw", "smart_192_raw", "smart_240_raw"], -"svm_222.pkl":["smart_10_raw", "smart_13_raw", "smart_188_raw", "smart_15_raw", "smart_192_raw", "smart_224_raw", "smart_225_raw", "smart_187_raw", "smart_222_raw", "smart_220_raw", "smart_252_raw"], -"svm_62.pkl":["smart_196_raw", "smart_251_raw", "smart_187_raw", "smart_224_raw", "smart_11_raw", "smart_12_raw", "smart_8_raw", "smart_199_raw", "smart_220_raw", "smart_195_raw"], -"svm_151.pkl":["smart_187_raw", "smart_223_raw", "smart_200_raw", "smart_189_raw", "smart_251_raw", "smart_255_raw", "smart_222_raw", "smart_192_raw", "smart_12_raw", "smart_183_raw", "smart_22_raw"], -"svm_125.pkl":["smart_9_raw", "smart_252_raw", "smart_197_raw", "smart_251_raw", "smart_11_raw", "smart_12_raw", "smart_188_raw", "smart_240_raw", "smart_10_raw", "smart_223_raw"], -"svm_124.pkl":["smart_193_raw", "smart_187_raw", "smart_183_raw", "smart_11_raw", "smart_10_raw", "smart_8_raw", "smart_194_raw", "smart_189_raw", "smart_222_raw", "smart_191_raw"], -"svm_67.pkl":["smart_2_raw", "smart_8_raw", "smart_225_raw", "smart_240_raw", "smart_13_raw", "smart_5_raw", "smart_187_raw", "smart_198_raw", "smart_199_raw", "smart_3_raw"], -"svm_115.pkl":["smart_222_raw", "smart_193_raw", "smart_223_raw", "smart_195_raw", "smart_252_raw", "smart_189_raw", "smart_199_raw", "smart_187_raw", "smart_15_raw", "smart_184_raw"], -"svm_1.pkl":["smart_201_raw", "smart_8_raw", "smart_200_raw", "smart_252_raw", "smart_251_raw", "smart_187_raw", "smart_9_raw", "smart_188_raw", "smart_15_raw", "smart_184_raw"], -"svm_112.pkl":["smart_220_raw", "smart_197_raw", "smart_10_raw", "smart_188_raw", "smart_12_raw", "smart_4_raw", "smart_196_raw", "smart_3_raw", "smart_240_raw", "smart_225_raw"], -"svm_138.pkl":["smart_183_raw", "smart_10_raw", "smart_191_raw", "smart_195_raw", "smart_223_raw", "smart_189_raw", "smart_187_raw", "smart_255_raw", "smart_226_raw", "smart_8_raw"], -"svm_229.pkl":["smart_224_raw", "smart_8_raw", "smart_192_raw", "smart_220_raw", "smart_195_raw", "smart_183_raw", "smart_250_raw", "smart_187_raw", "smart_225_raw", "smart_4_raw", "smart_252_raw"], -"svm_145.pkl":["smart_190_raw", "smart_8_raw", "smart_226_raw", "smart_184_raw", "smart_225_raw", "smart_220_raw", "smart_193_raw", "smart_183_raw", "smart_201_raw", "smart_187_raw", "smart_2_raw"], -"svm_59.pkl":["smart_188_raw", "smart_11_raw", "smart_184_raw", "smart_2_raw", "smart_220_raw", "smart_198_raw", "smart_225_raw", "smart_240_raw", "smart_197_raw", "smart_251_raw"], -"svm_204.pkl":["smart_15_raw", "smart_240_raw", "smart_225_raw", "smart_223_raw", "smart_252_raw", "smart_22_raw", "smart_200_raw", "smart_13_raw", "smart_220_raw", "smart_198_raw", "smart_191_raw"], -"svm_88.pkl":["smart_198_raw", "smart_3_raw", "smart_8_raw", "smart_225_raw", "smart_251_raw", "smart_222_raw", "smart_188_raw", "smart_10_raw", "smart_240_raw", "smart_189_raw"], -"svm_182.pkl":["smart_10_raw", "smart_190_raw", "smart_250_raw", "smart_15_raw", "smart_193_raw", "smart_22_raw", "smart_200_raw", "smart_8_raw", "smart_4_raw", "smart_187_raw", "smart_9_raw"], -"svm_61.pkl":["smart_5_raw", "smart_12_raw", "smart_9_raw", "smart_198_raw", "smart_195_raw", "smart_252_raw", "smart_15_raw", "smart_240_raw", "smart_255_raw", "smart_224_raw"], -"svm_50.pkl":["smart_220_raw", "smart_5_raw", "smart_194_raw", "smart_250_raw", "smart_15_raw", "smart_240_raw", "smart_8_raw", "smart_198_raw", "smart_224_raw", "smart_191_raw"], -"svm_210.pkl":["smart_8_raw", "smart_15_raw", "smart_195_raw", "smart_224_raw", "smart_5_raw", "smart_191_raw", "smart_198_raw", "smart_225_raw", "smart_200_raw", "smart_251_raw", "smart_240_raw"], -"svm_16.pkl":["smart_222_raw", "smart_10_raw", "smart_250_raw", "smart_189_raw", "smart_191_raw", "smart_2_raw", "smart_5_raw", "smart_193_raw", "smart_9_raw", "smart_187_raw"], -"svm_85.pkl":["smart_252_raw", "smart_184_raw", "smart_9_raw", "smart_5_raw", "smart_254_raw", "smart_3_raw", "smart_195_raw", "smart_10_raw", "smart_12_raw", "smart_222_raw"], -"svm_36.pkl":["smart_201_raw", "smart_251_raw", "smart_184_raw", "smart_3_raw", "smart_5_raw", "smart_183_raw", "smart_194_raw", "smart_195_raw", "smart_224_raw", "smart_2_raw"], -"svm_33.pkl":["smart_223_raw", "smart_254_raw", "smart_225_raw", "smart_9_raw", "smart_199_raw", "smart_5_raw", "smart_189_raw", "smart_194_raw", "smart_240_raw", "smart_4_raw"], -"svm_3.pkl":["smart_225_raw", "smart_194_raw", "smart_3_raw", "smart_189_raw", "smart_9_raw", "smart_254_raw", "smart_240_raw", "smart_5_raw", "smart_255_raw", "smart_223_raw"], -"svm_93.pkl":["smart_8_raw", "smart_188_raw", "smart_5_raw", "smart_10_raw", "smart_222_raw", "smart_2_raw", "smart_254_raw", "smart_12_raw", "smart_193_raw", "smart_224_raw"], -"svm_120.pkl":["smart_189_raw", "smart_224_raw", "smart_222_raw", "smart_193_raw", "smart_5_raw", "smart_201_raw", "smart_8_raw", "smart_254_raw", "smart_194_raw", "smart_22_raw"], -"svm_128.pkl":["smart_195_raw", "smart_184_raw", "smart_251_raw", "smart_8_raw", "smart_5_raw", "smart_196_raw", "smart_10_raw", "smart_4_raw", "smart_225_raw", "smart_191_raw"], -"svm_212.pkl":["smart_225_raw", "smart_192_raw", "smart_10_raw", "smart_12_raw", "smart_222_raw", "smart_184_raw", "smart_13_raw", "smart_226_raw", "smart_5_raw", "smart_201_raw", "smart_22_raw"], -"svm_221.pkl":["smart_255_raw", "smart_2_raw", "smart_224_raw", "smart_192_raw", "smart_252_raw", "smart_13_raw", "smart_183_raw", "smart_193_raw", "smart_15_raw", "smart_199_raw", "smart_200_raw"], -"svm_223.pkl":["smart_4_raw", "smart_194_raw", "smart_9_raw", "smart_255_raw", "smart_188_raw", "smart_201_raw", "smart_3_raw", "smart_226_raw", "smart_192_raw", "smart_251_raw", "smart_191_raw"], -"svm_44.pkl":["smart_255_raw", "smart_11_raw", "smart_200_raw", "smart_3_raw", "smart_195_raw", "smart_201_raw", "smart_4_raw", "smart_5_raw", "smart_10_raw", "smart_191_raw"], -"svm_213.pkl":["smart_22_raw", "smart_191_raw", "smart_183_raw", "smart_4_raw", "smart_194_raw", "smart_255_raw", "smart_254_raw", "smart_193_raw", "smart_11_raw", "smart_10_raw", "smart_220_raw"], -"svm_131.pkl":["smart_22_raw", "smart_194_raw", "smart_184_raw", "smart_250_raw", "smart_10_raw", "smart_189_raw", "smart_183_raw", "smart_240_raw", "smart_12_raw", "smart_252_raw"], -"svm_6.pkl":["smart_194_raw", "smart_250_raw", "smart_223_raw", "smart_224_raw", "smart_184_raw", "smart_191_raw", "smart_201_raw", "smart_9_raw", "smart_252_raw", "smart_3_raw"], -"svm_161.pkl":["smart_255_raw", "smart_222_raw", "smart_226_raw", "smart_254_raw", "smart_183_raw", "smart_22_raw", "smart_12_raw", "smart_190_raw", "smart_11_raw", "smart_192_raw", "smart_251_raw"], -"svm_72.pkl":["smart_13_raw", "smart_184_raw", "smart_223_raw", "smart_240_raw", "smart_250_raw", "smart_251_raw", "smart_201_raw", "smart_196_raw", "smart_5_raw", "smart_4_raw"], -"svm_27.pkl":["smart_189_raw", "smart_188_raw", "smart_255_raw", "smart_251_raw", "smart_240_raw", "smart_15_raw", "smart_9_raw", "smart_191_raw", "smart_226_raw", "smart_10_raw"], -"svm_141.pkl":["smart_9_raw", "smart_191_raw", "smart_2_raw", "smart_226_raw", "smart_13_raw", "smart_22_raw", "smart_193_raw", "smart_222_raw", "smart_220_raw", "smart_225_raw", "smart_3_raw"], -"svm_57.pkl":["smart_12_raw", "smart_252_raw", "smart_190_raw", "smart_226_raw", "smart_10_raw", "smart_189_raw", "smart_193_raw", "smart_2_raw", "smart_9_raw", "smart_223_raw"], -"svm_236.pkl":["smart_200_raw", "smart_189_raw", "smart_226_raw", "smart_252_raw", "smart_250_raw", "smart_193_raw", "smart_13_raw", "smart_2_raw", "smart_254_raw", "smart_22_raw", "smart_9_raww"], -"svm_208.pkl":["smart_223_raw", "smart_15_raw", "smart_251_raw", "smart_5_raw", "smart_198_raw", "smart_252_raw", "smart_4_raw", "smart_8_raw", "smart_220_raw", "smart_254_raw", "smart_193_raw"], -"svm_230.pkl":["smart_184_raw", "smart_5_raw", "smart_191_raw", "smart_198_raw", "smart_11_raw", "smart_255_raw", "smart_189_raw", "smart_254_raw", "smart_196_raw", "smart_199_raw", "smart_223_raw"], -"svm_134.pkl":["smart_8_raw", "smart_194_raw", "smart_4_raw", "smart_189_raw", "smart_223_raw", "smart_5_raw", "smart_187_raw", "smart_9_raw", "smart_192_raw", "smart_220_raw"], -"svm_71.pkl":["smart_220_raw", "smart_13_raw", "smart_194_raw", "smart_197_raw", "smart_192_raw", "smart_22_raw", "smart_184_raw", "smart_199_raw", "smart_222_raw", "smart_183_raw"], -"svm_109.pkl":["smart_224_raw", "smart_252_raw", "smart_2_raw", "smart_200_raw", "smart_5_raw", "smart_194_raw", "smart_222_raw", "smart_198_raw", "smart_4_raw", "smart_13_raw"] -} diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_1.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_1.pkl deleted file mode 100644 index 5eb30f300e5..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_1.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_10.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_10.pkl deleted file mode 100644 index 9259c1e7433..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_10.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_104.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_104.pkl deleted file mode 100644 index d5d5cf5b7bc..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_104.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_105.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_105.pkl deleted file mode 100644 index 4aadc3cfbf1..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_105.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_109.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_109.pkl deleted file mode 100644 index c99c353be31..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_109.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_112.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_112.pkl deleted file mode 100644 index 367a3304aa3..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_112.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_114.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_114.pkl deleted file mode 100644 index 946d5cef1ba..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_114.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_115.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_115.pkl deleted file mode 100644 index ff834929ec6..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_115.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_118.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_118.pkl deleted file mode 100644 index eec8689ea63..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_118.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_119.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_119.pkl deleted file mode 100644 index 6a26c050267..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_119.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_12.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_12.pkl deleted file mode 100644 index 5cbe9775a15..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_12.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_120.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_120.pkl deleted file mode 100644 index d2041c267a4..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_120.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_123.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_123.pkl deleted file mode 100644 index 0ab6187e99d..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_123.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_124.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_124.pkl deleted file mode 100644 index 8f9ea4ec7c8..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_124.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_125.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_125.pkl deleted file mode 100644 index 4d49900f932..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_125.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_128.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_128.pkl deleted file mode 100644 index 6a18726de2f..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_128.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_131.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_131.pkl deleted file mode 100644 index e6a55dcaece..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_131.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_134.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_134.pkl deleted file mode 100644 index 51171e00c59..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_134.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_138.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_138.pkl deleted file mode 100644 index bc98e0c7255..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_138.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_14.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_14.pkl deleted file mode 100644 index c4547dc6394..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_14.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_141.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_141.pkl deleted file mode 100644 index 86d9f38de31..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_141.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_145.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_145.pkl deleted file mode 100644 index 24ff9623103..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_145.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_151.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_151.pkl deleted file mode 100644 index 92bfd3f1b86..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_151.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_16.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_16.pkl deleted file mode 100644 index 11664b3dd01..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_16.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_161.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_161.pkl deleted file mode 100644 index 2d421685e6f..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_161.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_168.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_168.pkl deleted file mode 100644 index 12a811cfab5..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_168.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_169.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_169.pkl deleted file mode 100644 index 0c51446c689..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_169.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_174.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_174.pkl deleted file mode 100644 index d2945ce9f9a..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_174.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_18.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_18.pkl deleted file mode 100644 index d05520ccd87..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_18.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_182.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_182.pkl deleted file mode 100644 index 7fcfb3cbdec..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_182.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_185.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_185.pkl deleted file mode 100644 index 785301c1796..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_185.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_186.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_186.pkl deleted file mode 100644 index 4ea83da7773..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_186.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_195.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_195.pkl deleted file mode 100644 index 12273f7ce7e..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_195.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_201.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_201.pkl deleted file mode 100644 index c866cf00e63..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_201.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_204.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_204.pkl deleted file mode 100644 index 8cf1c3aa28e..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_204.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_206.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_206.pkl deleted file mode 100644 index cba64e80049..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_206.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_208.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_208.pkl deleted file mode 100644 index ba0df0abdbd..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_208.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_210.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_210.pkl deleted file mode 100644 index 6b5bee219e3..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_210.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_212.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_212.pkl deleted file mode 100644 index 11eafc64c56..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_212.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_213.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_213.pkl deleted file mode 100644 index 0b8475c581c..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_213.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_219.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_219.pkl deleted file mode 100644 index 4a248c14ca0..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_219.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_221.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_221.pkl deleted file mode 100644 index e37c6b4fb3d..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_221.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_222.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_222.pkl deleted file mode 100644 index e54303863e0..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_222.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_223.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_223.pkl deleted file mode 100644 index 8b208f4e823..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_223.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_225.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_225.pkl deleted file mode 100644 index 3f2b62984af..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_225.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_227.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_227.pkl deleted file mode 100644 index 5e4fb56f4b7..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_227.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_229.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_229.pkl deleted file mode 100644 index 1e9c33599ed..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_229.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_230.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_230.pkl deleted file mode 100644 index 36f8205cead..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_230.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_234.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_234.pkl deleted file mode 100644 index 199f9ba5110..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_234.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_235.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_235.pkl deleted file mode 100644 index d986526eca2..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_235.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_236.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_236.pkl deleted file mode 100644 index 160e22fae38..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_236.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_239.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_239.pkl deleted file mode 100644 index 8d98572acce..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_239.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_243.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_243.pkl deleted file mode 100644 index 4fca95e1da0..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_243.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_27.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_27.pkl deleted file mode 100644 index 011974ed1b9..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_27.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_3.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_3.pkl deleted file mode 100644 index e5e97a8888b..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_3.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_33.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_33.pkl deleted file mode 100644 index e709d7b46e5..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_33.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_36.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_36.pkl deleted file mode 100644 index 3d87b8bd904..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_36.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_44.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_44.pkl deleted file mode 100644 index 9abcece9239..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_44.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_50.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_50.pkl deleted file mode 100644 index b7ce5eda94a..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_50.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_57.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_57.pkl deleted file mode 100644 index fe7832894bb..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_57.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_59.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_59.pkl deleted file mode 100644 index 76217777be8..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_59.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_6.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_6.pkl deleted file mode 100644 index 4fb09d37464..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_6.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_61.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_61.pkl deleted file mode 100644 index 319fc5f457b..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_61.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_62.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_62.pkl deleted file mode 100644 index 25b21aed63b..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_62.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_67.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_67.pkl deleted file mode 100644 index 1e6e7383a61..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_67.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_69.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_69.pkl deleted file mode 100644 index 22d349a7c5e..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_69.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_71.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_71.pkl deleted file mode 100644 index e0760add925..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_71.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_72.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_72.pkl deleted file mode 100644 index 5096aa8e466..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_72.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_78.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_78.pkl deleted file mode 100644 index 7958f3b6c25..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_78.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_79.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_79.pkl deleted file mode 100644 index 2ed3a0fe911..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_79.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_82.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_82.pkl deleted file mode 100644 index 2e1884094b7..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_82.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_85.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_85.pkl deleted file mode 100644 index 88161af56fa..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_85.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_88.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_88.pkl deleted file mode 100644 index 715633982ce..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_88.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_93.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_93.pkl deleted file mode 100644 index 703429fe3c8..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_93.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_97.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_97.pkl deleted file mode 100644 index 9653d20f397..00000000000 Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_97.pkl and /dev/null differ diff --git a/src/pybind/mgr/diskprediction/requirements.txt b/src/pybind/mgr/diskprediction/requirements.txt deleted file mode 100644 index 3a22a072eba..00000000000 --- a/src/pybind/mgr/diskprediction/requirements.txt +++ /dev/null @@ -1,14 +0,0 @@ -google==2.0.1 -google-api-python-client==1.7.3 -google-auth==1.5.0 -google-auth-httplib2==0.0.3 -google-gax==0.12.5 -googleapis-common-protos==1.5.3 -grpc==0.3.post19 -grpc-google-logging-v2==0.8.1 -grpc-google-pubsub-v1==0.8.1 -grpcio==1.14.1 -mock==2.0.0 -numpy==1.15.1 -scikit-learn==0.19.2 -scipy==1.1.0 diff --git a/src/pybind/mgr/diskprediction/task.py b/src/pybind/mgr/diskprediction/task.py deleted file mode 100644 index 914ea59270d..00000000000 --- a/src/pybind/mgr/diskprediction/task.py +++ /dev/null @@ -1,157 +0,0 @@ -from __future__ import absolute_import - -import time -from threading import Event, Thread - -from .agent.metrics.ceph_cluster import CephClusterAgent -from .agent.metrics.ceph_mon_osd import CephMonOsdAgent -from .agent.metrics.ceph_pool import CephPoolAgent -from .agent.metrics.db_relay import DBRelayAgent -from .agent.metrics.sai_agent import SAIAgent -from .agent.metrics.sai_cluster import SAICluserAgent -from .agent.metrics.sai_disk import SAIDiskAgent -from .agent.metrics.sai_disk_smart import SAIDiskSmartAgent -from .agent.metrics.sai_host import SAIHostAgent -from .agent.predict.prediction import PredictionAgent -from .common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK, DP_MGR_STAT_WARNING - - -class AgentRunner(Thread): - - task_name = '' - interval_key = '' - agents = [] - - def __init__(self, mgr_module, agent_timeout=60): - """ - - :param mgr_module: parent ceph mgr module - :param agent_timeout: (unit seconds) agent execute timeout value, default: 60 secs - """ - Thread.__init__(self) - self._agent_timeout = agent_timeout - self._module_inst = mgr_module - self._log = mgr_module.log - self._obj_sender = None - self._start_time = None - self._th = None - - self.exit = False - self.event = Event() - self.task_interval = \ - int(self._module_inst.get_configuration(self.interval_key)) - - def terminate(self): - self.exit = True - self.event.set() - self._log.info('PDS terminate %s complete' % self.task_name) - - def run(self): - self._start_time = time.time() - self._log.debug( - 'start %s, interval: %s' - % (self.task_name, self.task_interval)) - while not self.exit: - self.run_agents() - if self.event: - self.event.wait(int(self.task_interval)) - self.event.clear() - self._log.info( - 'completed %s(%s)' % (self.task_name, time.time()-self._start_time)) - - def run_agents(self): - try: - self._log.debug('run_agents %s' % self.task_name) - model = self._module_inst.get_configuration('diskprediction_config_mode') - if model.lower() == 'cloud': - # from .common.restapiclient import RestApiClient, gen_configuration - from .common.grpcclient import GRPcClient, gen_configuration - conf = gen_configuration( - host=self._module_inst.get_configuration('diskprediction_server'), - user=self._module_inst.get_configuration('diskprediction_user'), - password=self._module_inst.get_configuration( - 'diskprediction_password'), - port=self._module_inst.get_configuration('diskprediction_port'), - cert_context=self._module_inst.get_configuration('diskprediction_cert_context'), - mgr_inst=self._module_inst, - ssl_target_name=self._module_inst.get_configuration('diskprediction_ssl_target_name_override'), - default_authority=self._module_inst.get_configuration('diskprediction_default_authority')) - self._obj_sender = GRPcClient(conf) - else: - from .common.localpredictor import LocalPredictor, gen_configuration - conf = gen_configuration(mgr_inst=self._module_inst) - self._obj_sender = LocalPredictor(conf) - if not self._obj_sender: - self._log.error('invalid diskprediction sender') - self._module_inst.status = \ - {'status': DP_MGR_STAT_FAILED, - 'reason': 'invalid diskprediction sender'} - return - if self._obj_sender.test_connection(): - self._module_inst.status = {'status': DP_MGR_STAT_OK} - self._log.debug('succeed to test connection') - self._run() - else: - self._log.error('failed to test connection') - self._module_inst.status = \ - {'status': DP_MGR_STAT_FAILED, - 'reason': 'failed to test connection'} - except Exception as e: - self._module_inst.status = \ - {'status': DP_MGR_STAT_FAILED, - 'reason': 'failed to start %s agents, %s' - % (self.task_name, str(e))} - self._log.error( - 'failed to start %s agents, %s' % (self.task_name, str(e))) - - def _run(self): - self._log.debug('%s run' % self.task_name) - for agent in self.agents: - retry_count = 3 - while retry_count: - retry_count -= 1 - try: - obj_agent = agent( - self._module_inst, self._obj_sender, - self._agent_timeout) - obj_agent.run() - break - except Exception as e: - if str(e).find('configuring') >= 0: - self._log.debug( - 'failed to execute {}, {}, retry again.'.format( - agent.measurement, str(e))) - time.sleep(1) - continue - else: - self._module_inst.status = \ - {'status': DP_MGR_STAT_WARNING, - 'reason': 'failed to execute {}, {}'.format( - agent.measurement, ';'.join(str(e).split('\n\t')))} - self._log.warning( - 'failed to execute {}, {}'.format( - agent.measurement, ';'.join(str(e).split('\n\t')))) - break - - -class MetricsRunner(AgentRunner): - - task_name = 'Metrics Agent' - interval_key = 'diskprediction_upload_metrics_interval' - agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent, - SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent, - SAIAgent] - - -class PredictionRunner(AgentRunner): - - task_name = 'Prediction Agent' - interval_key = 'diskprediction_retrieve_prediction_interval' - agents = [PredictionAgent] - - -class SmartRunner(AgentRunner): - - task_name = 'Smart data Agent' - interval_key = 'diskprediction_upload_smart_interval' - agents = [SAIDiskSmartAgent] diff --git a/src/pybind/mgr/diskprediction/test/__init__.py b/src/pybind/mgr/diskprediction/test/__init__.py deleted file mode 100644 index 1f19be57566..00000000000 --- a/src/pybind/mgr/diskprediction/test/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 diff --git a/src/pybind/mgr/diskprediction/test/test_agents.py b/src/pybind/mgr/diskprediction/test/test_agents.py deleted file mode 100644 index 55999db1994..00000000000 --- a/src/pybind/mgr/diskprediction/test/test_agents.py +++ /dev/null @@ -1,49 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -import time -import mock - -from ..agent.metrics.ceph_cluster import CephClusterAgent -from ..agent.metrics.ceph_mon_osd import CephMonOsdAgent -from ..agent.metrics.ceph_pool import CephPoolAgent -from ..agent.metrics.db_relay import DBRelayAgent -from ..agent.metrics.sai_agent import SAIAgent -from ..agent.metrics.sai_cluster import SAICluserAgent -from ..agent.metrics.sai_disk import SAIDiskAgent -from ..agent.metrics.sai_disk_smart import SAIDiskSmartAgent -from ..agent.metrics.sai_host import SAIHostAgent -from ..agent.predict.prediction import PredictionAgent -from ..common import DummyResonse - -TEMP_RESPONSE = { - "disk_domain_id": 'abc', - "near_failure": 'Good', - "predicted": int(time.time() * (1000 ** 3))} - -def generate_sender_mock(): - sender_mock = mock.MagicMock() - sender = sender_mock - status_info = dict() - status_info['measurement'] = None - status_info['success_count'] = 1 - status_info['failure_count'] = 0 - sender_mock.send_info.return_value = status_info - - query_value = DummyResonse() - query_value.status_code = 200 - query_value.resp_json = TEMP_RESPONSE - sender_mock.query_info.return_value = query_value - return sender - - -def test_agents(mgr_inst, sender=None): - if sender is None: - sender = generate_sender_mock() - - metrics_agents = \ - [CephClusterAgent, CephMonOsdAgent, CephPoolAgent, DBRelayAgent, - SAIAgent, SAICluserAgent, SAIDiskAgent, SAIDiskSmartAgent, - SAIHostAgent, PredictionAgent] - for agent in metrics_agents: - obj_agent = agent(mgr_inst, sender) - obj_agent.run() diff --git a/src/pybind/mgr/diskprediction_cloud/__init__.py b/src/pybind/mgr/diskprediction_cloud/__init__.py new file mode 100644 index 00000000000..8f210ac9247 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/__init__.py @@ -0,0 +1 @@ +from .module import Module diff --git a/src/pybind/mgr/diskprediction_cloud/agent/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/__init__.py new file mode 100644 index 00000000000..c7702e5242c --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/__init__.py @@ -0,0 +1,38 @@ +from __future__ import absolute_import + +from ..common import timeout, TimeoutError + + +class BaseAgent(object): + + measurement = '' + + def __init__(self, mgr_module, obj_sender, timeout=30): + self.data = [] + self._client = None + self._client = obj_sender + self._logger = mgr_module.log + self._module_inst = mgr_module + self._timeout = timeout + + def run(self): + try: + self._collect_data() + self._run() + except TimeoutError: + self._logger.error('{} failed to execute {} task'.format( + __name__, self.measurement)) + + def __nonzero__(self): + if not self._module_inst: + return False + else: + return True + + @timeout() + def _run(self): + pass + + @timeout() + def _collect_data(self): + pass diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py new file mode 100644 index 00000000000..9e7e5b0ba53 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py @@ -0,0 +1,61 @@ +from __future__ import absolute_import + +from .. import BaseAgent +from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING, DP_MGR_STAT_OK + +AGENT_VERSION = '1.0.0' + + +class MetricsField(object): + def __init__(self): + self.tags = {} + self.fields = {} + self.timestamp = None + + def __str__(self): + return str({ + 'tags': self.tags, + 'fields': self.fields, + 'timestamp': self.timestamp + }) + + +class MetricsAgent(BaseAgent): + + def log_summary(self, status_info): + try: + if status_info: + measurement = status_info['measurement'] + success_count = status_info['success_count'] + failure_count = status_info['failure_count'] + total_count = success_count + failure_count + display_string = \ + '%s agent stats in total count: %s, success count: %s, failure count: %s.' + self._logger.info( + display_string % (measurement, total_count, success_count, failure_count) + ) + except Exception as e: + self._logger.error(str(e)) + + def _run(self): + collect_data = self.data + result = {} + if collect_data and self._client: + status_info = self._client.send_info(collect_data, self.measurement) + # show summary info + self.log_summary(status_info) + # write sub_agent buffer + total_count = status_info['success_count'] + status_info['failure_count'] + if total_count: + if status_info['success_count'] == 0: + self._module_inst.status = \ + {'status': DP_MGR_STAT_FAILED, + 'reason': 'failed to send metrics data to the server'} + elif status_info['failure_count'] == 0: + self._module_inst.status = \ + {'status': DP_MGR_STAT_OK} + else: + self._module_inst.status = \ + {'status': DP_MGR_STAT_WARNING, + 'reason': 'failed to send partial metrics data to the server'} + return result diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py new file mode 100644 index 00000000000..4477802a0ba --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py @@ -0,0 +1,146 @@ +from __future__ import absolute_import + +import socket + +from . import MetricsAgent, MetricsField +from ...common.clusterdata import ClusterAPI + + +class CephCluster(MetricsField): + """ Ceph cluster structure """ + measurement = 'ceph_cluster' + + def __init__(self): + super(CephCluster, self).__init__() + self.tags['cluster_id'] = None + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.fields['cluster_health'] = '' + self.fields['num_mon'] = None + self.fields['num_mon_quorum'] = None + self.fields['num_osd'] = None + self.fields['num_osd_up'] = None + self.fields['num_osd_in'] = None + self.fields['osd_epoch'] = None + self.fields['osd_bytes'] = None + self.fields['osd_bytes_used'] = None + self.fields['osd_bytes_avail'] = None + self.fields['num_pool'] = None + self.fields['num_pg'] = None + self.fields['num_pg_active_clean'] = None + self.fields['num_pg_active'] = None + self.fields['num_pg_peering'] = None + self.fields['num_object'] = None + self.fields['num_object_degraded'] = None + self.fields['num_object_misplaced'] = None + self.fields['num_object_unfound'] = None + self.fields['num_bytes'] = None + self.fields['num_mds_up'] = None + self.fields['num_mds_in'] = None + self.fields['num_mds_failed'] = None + self.fields['mds_epoch'] = None + + +class CephClusterAgent(MetricsAgent): + measurement = 'ceph_cluster' + + def _collect_data(self): + # process data and save to 'self.data' + obj_api = ClusterAPI(self._module_inst) + cluster_id = obj_api.get_cluster_id() + + c_data = CephCluster() + cluster_state = obj_api.get_health_status() + c_data.tags['cluster_id'] = cluster_id + c_data.fields['cluster_health'] = str(cluster_state) + c_data.fields['agenthost'] = socket.gethostname() + c_data.tags['agenthost_domain_id'] = \ + '%s_%s' % (cluster_id, c_data.fields['agenthost']) + c_data.fields['osd_epoch'] = obj_api.get_osd_epoch() + c_data.fields['num_mon'] = len(obj_api.get_mons()) + c_data.fields['num_mon_quorum'] = \ + len(obj_api.get_mon_status().get('quorum', [])) + + osds = obj_api.get_osds() + num_osd_up = 0 + num_osd_in = 0 + for osd_data in osds: + if osd_data.get('up'): + num_osd_up = num_osd_up + 1 + if osd_data.get('in'): + num_osd_in = num_osd_in + 1 + if osds: + c_data.fields['num_osd'] = len(osds) + else: + c_data.fields['num_osd'] = 0 + c_data.fields['num_osd_up'] = num_osd_up + c_data.fields['num_osd_in'] = num_osd_in + c_data.fields['num_pool'] = len(obj_api.get_osd_pools()) + + df_stats = obj_api.module.get('df').get('stats', {}) + total_bytes = df_stats.get('total_bytes', 0) + total_used_bytes = df_stats.get('total_used_bytes', 0) + total_avail_bytes = df_stats.get('total_avail_bytes', 0) + c_data.fields['osd_bytes'] = total_bytes + c_data.fields['osd_bytes_used'] = total_used_bytes + c_data.fields['osd_bytes_avail'] = total_avail_bytes + if total_bytes and total_avail_bytes: + c_data.fields['osd_bytes_used_percentage'] = \ + round(float(total_used_bytes) / float(total_bytes) * 100, 4) + else: + c_data.fields['osd_bytes_used_percentage'] = 0.0000 + + pg_stats = obj_api.module.get('pg_dump').get('pg_stats', []) + num_bytes = 0 + num_object = 0 + num_object_degraded = 0 + num_object_misplaced = 0 + num_object_unfound = 0 + num_pg_active = 0 + num_pg_active_clean = 0 + num_pg_peering = 0 + for pg_data in pg_stats: + num_pg_active = num_pg_active + len(pg_data.get('acting')) + if 'active+clean' in pg_data.get('state'): + num_pg_active_clean = num_pg_active_clean + 1 + if 'peering' in pg_data.get('state'): + num_pg_peering = num_pg_peering + 1 + + stat_sum = pg_data.get('stat_sum', {}) + num_object = num_object + stat_sum.get('num_objects', 0) + num_object_degraded = \ + num_object_degraded + stat_sum.get('num_objects_degraded', 0) + num_object_misplaced = \ + num_object_misplaced + stat_sum.get('num_objects_misplaced', 0) + num_object_unfound = \ + num_object_unfound + stat_sum.get('num_objects_unfound', 0) + num_bytes = num_bytes + stat_sum.get('num_bytes', 0) + + c_data.fields['num_pg'] = len(pg_stats) + c_data.fields['num_object'] = num_object + c_data.fields['num_object_degraded'] = num_object_degraded + c_data.fields['num_object_misplaced'] = num_object_misplaced + c_data.fields['num_object_unfound'] = num_object_unfound + c_data.fields['num_bytes'] = num_bytes + c_data.fields['num_pg_active'] = num_pg_active + c_data.fields['num_pg_active_clean'] = num_pg_active_clean + c_data.fields['num_pg_peering'] = num_pg_active_clean + + filesystems = obj_api.get_file_systems() + num_mds_in = 0 + num_mds_up = 0 + num_mds_failed = 0 + mds_epoch = 0 + for fs_data in filesystems: + num_mds_in = \ + num_mds_in + len(fs_data.get('mdsmap', {}).get('in', [])) + num_mds_up = \ + num_mds_up + len(fs_data.get('mdsmap', {}).get('up', {})) + num_mds_failed = \ + num_mds_failed + len(fs_data.get('mdsmap', {}).get('failed', [])) + mds_epoch = mds_epoch + fs_data.get('mdsmap', {}).get('epoch', 0) + c_data.fields['num_mds_in'] = num_mds_in + c_data.fields['num_mds_up'] = num_mds_up + c_data.fields['num_mds_failed'] = num_mds_failed + c_data.fields['mds_epoch'] = mds_epoch + self.data.append(c_data) diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py new file mode 100644 index 00000000000..138618695ec --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py @@ -0,0 +1,224 @@ +from __future__ import absolute_import + +import socket + +from . import MetricsAgent, MetricsField +from ...common.clusterdata import ClusterAPI + + +class CephMON(MetricsField): + """ Ceph monitor structure """ + measurement = 'ceph_mon' + + def __init__(self): + super(CephMON, self).__init__() + self.tags['cluster_id'] = None + self.tags['mon_id'] = None + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.fields['num_sessions'] = None + self.fields['session_add'] = None + self.fields['session_rm'] = None + self.fields['session_trim'] = None + self.fields['num_elections'] = None + self.fields['election_call'] = None + self.fields['election_win'] = None + self.fields['election_lose'] = None + + +class CephErasureProfile(MetricsField): + """ Ceph osd erasure profile """ + measurement = 'ceph_erasure_profile' + + def __init__(self): + super(CephErasureProfile, self).__init__() + self.tags['cluster_id'] = None + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.tags['host_domain_id'] = None + self.fields['name'] = None + + +class CephOsdTree(MetricsField): + """ Ceph osd tree map """ + measurement = 'ceph_osd_tree' + + def __init__(self): + super(CephOsdTree, self).__init__() + self.tags['cluster_id'] = None + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.tags['host_domain_id'] = None + self.fields['name'] = None + + +class CephOSD(MetricsField): + """ Ceph osd structure """ + measurement = 'ceph_osd' + + def __init__(self): + super(CephOSD, self).__init__() + self.tags['cluster_id'] = None + self.tags['osd_id'] = None + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.tags['host_domain_id'] = None + self.fields['op_w'] = None + self.fields['op_in_bytes'] = None + self.fields['op_r'] = None + self.fields['op_out_bytes'] = None + self.fields['op_wip'] = None + self.fields['op_latency'] = None + self.fields['op_process_latency'] = None + self.fields['op_r_latency'] = None + self.fields['op_r_process_latency'] = None + self.fields['op_w_in_bytes'] = None + self.fields['op_w_latency'] = None + self.fields['op_w_process_latency'] = None + self.fields['op_w_prepare_latency'] = None + self.fields['op_rw'] = None + self.fields['op_rw_in_bytes'] = None + self.fields['op_rw_out_bytes'] = None + self.fields['op_rw_latency'] = None + self.fields['op_rw_process_latency'] = None + self.fields['op_rw_prepare_latency'] = None + self.fields['op_before_queue_op_lat'] = None + self.fields['op_before_dequeue_op_lat'] = None + + +class CephMonOsdAgent(MetricsAgent): + measurement = 'ceph_mon_osd' + + # counter types + PERFCOUNTER_LONGRUNAVG = 4 + PERFCOUNTER_COUNTER = 8 + PERFCOUNTER_HISTOGRAM = 0x10 + PERFCOUNTER_TYPE_MASK = ~3 + + def _stattype_to_str(self, stattype): + typeonly = stattype & self.PERFCOUNTER_TYPE_MASK + if typeonly == 0: + return 'gauge' + if typeonly == self.PERFCOUNTER_LONGRUNAVG: + # this lie matches the DaemonState decoding: only val, no counts + return 'counter' + if typeonly == self.PERFCOUNTER_COUNTER: + return 'counter' + if typeonly == self.PERFCOUNTER_HISTOGRAM: + return 'histogram' + return '' + + def _generage_osd_erasure_profile(self, cluster_id): + obj_api = ClusterAPI(self._module_inst) + osd_map = obj_api.module.get('osd_map') + if osd_map: + for n, n_value in osd_map.get('erasure_code_profiles', {}).items(): + e_osd = CephErasureProfile() + e_osd.fields['name'] = n + e_osd.tags['cluster_id'] = cluster_id + e_osd.fields['agenthost'] = socket.gethostname() + e_osd.tags['agenthost_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname) + e_osd.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname) + for k in n_value.keys(): + e_osd.fields[k] = str(n_value[k]) + self.data.append(e_osd) + + def _generate_osd_tree(self, cluster_id): + obj_api = ClusterAPI(self._module_inst) + osd_tree = obj_api.module.get('osd_map_tree') + if osd_tree: + for node in osd_tree.get('nodes', []): + n_node = CephOsdTree() + n_node.tags['cluster_id'] = cluster_id + n_node.fields['agenthost'] = socket.gethostname() + n_node.tags['agenthost_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname) + n_node.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname) + n_node.fields['children'] = ','.join(str(x) for x in node.get('children', [])) + n_node.fields['type_id'] = str(node.get('type_id', '')) + n_node.fields['id'] = str(node.get('id', '')) + n_node.fields['name'] = str(node.get('name', '')) + n_node.fields['type'] = str(node.get('type', '')) + n_node.fields['reweight'] = float(node.get('reweight', 0.0)) + n_node.fields['crush_weight'] = float(node.get('crush_weight', 0.0)) + n_node.fields['primary_affinity'] = float(node.get('primary_affinity', 0.0)) + n_node.fields['device_class'] = str(node.get('device_class', '')) + self.data.append(n_node) + + def _generate_osd(self, cluster_id, service_name, perf_counts): + obj_api = ClusterAPI(self._module_inst) + service_id = service_name[4:] + d_osd = CephOSD() + stat_bytes = 0 + stat_bytes_used = 0 + d_osd.tags['cluster_id'] = cluster_id + d_osd.tags['osd_id'] = service_name[4:] + d_osd.fields['agenthost'] = socket.gethostname() + d_osd.tags['agenthost_domain_id'] = \ + '%s_%s' % (cluster_id, d_osd.fields['agenthost']) + d_osd.tags['host_domain_id'] = \ + '%s_%s' % (cluster_id, + obj_api.get_osd_hostname(d_osd.tags['osd_id'])) + + for i_key, i_val in perf_counts.items(): + if i_key[:4] == 'osd.': + key_name = i_key[4:] + else: + key_name = i_key + if self._stattype_to_str(i_val['type']) == 'counter': + value = obj_api.get_rate('osd', service_id, i_key) + else: + value = obj_api.get_latest('osd', service_id, i_key) + if key_name == 'stat_bytes': + stat_bytes = value + elif key_name == 'stat_bytes_used': + stat_bytes_used = value + else: + d_osd.fields[key_name] = value + + if stat_bytes and stat_bytes_used: + d_osd.fields['stat_bytes_used_percentage'] = \ + round(float(stat_bytes_used) / float(stat_bytes) * 100, 4) + else: + d_osd.fields['stat_bytes_used_percentage'] = 0.0000 + self.data.append(d_osd) + + def _generate_mon(self, cluster_id, service_name, perf_counts): + d_mon = CephMON() + d_mon.tags['cluster_id'] = cluster_id + d_mon.tags['mon_id'] = service_name[4:] + d_mon.fields['agenthost'] = socket.gethostname() + d_mon.tags['agenthost_domain_id'] = \ + '%s_%s' % (cluster_id, d_mon.fields['agenthost']) + d_mon.fields['num_sessions'] = \ + perf_counts.get('mon.num_sessions', {}).get('value', 0) + d_mon.fields['session_add'] = \ + perf_counts.get('mon.session_add', {}).get('value', 0) + d_mon.fields['session_rm'] = \ + perf_counts.get('mon.session_rm', {}).get('value', 0) + d_mon.fields['session_trim'] = \ + perf_counts.get('mon.session_trim', {}).get('value', 0) + d_mon.fields['num_elections'] = \ + perf_counts.get('mon.num_elections', {}).get('value', 0) + d_mon.fields['election_call'] = \ + perf_counts.get('mon.election_call', {}).get('value', 0) + d_mon.fields['election_win'] = \ + perf_counts.get('mon.election_win', {}).get('value', 0) + d_mon.fields['election_lose'] = \ + perf_counts.get('election_lose', {}).get('value', 0) + self.data.append(d_mon) + + def _collect_data(self): + # process data and save to 'self.data' + obj_api = ClusterAPI(self._module_inst) + perf_data = obj_api.module.get_all_perf_counters() + if not perf_data and not isinstance(perf_data, dict): + self._logger.error('unable to get all perf counters') + return + cluster_id = obj_api.get_cluster_id() + for n_name, i_perf in perf_data.items(): + if n_name[0:3].lower() == 'mon': + self._generate_mon(cluster_id, n_name, i_perf) + elif n_name[0:3].lower() == 'osd': + self._generate_osd(cluster_id, n_name, i_perf) + self._generage_osd_erasure_profile(cluster_id) + self._generate_osd_tree(cluster_id) diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py new file mode 100644 index 00000000000..43e379ba5f8 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py @@ -0,0 +1,58 @@ +from __future__ import absolute_import + +import socket + +from . import MetricsAgent, MetricsField +from ...common.clusterdata import ClusterAPI + + +class CephPool(MetricsField): + """ Ceph pool structure """ + measurement = 'ceph_pool' + + def __init__(self): + super(CephPool, self).__init__() + self.tags['cluster_id'] = None + self.tags['pool_id'] = None + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.fields['bytes_used'] = None + self.fields['max_avail'] = None + self.fields['objects'] = None + self.fields['wr_bytes'] = None + self.fields['dirty'] = None + self.fields['rd_bytes'] = None + self.fields['raw_bytes_used'] = None + + +class CephPoolAgent(MetricsAgent): + measurement = 'ceph_pool' + + def _collect_data(self): + # process data and save to 'self.data' + obj_api = ClusterAPI(self._module_inst) + df_data = obj_api.module.get('df') + cluster_id = obj_api.get_cluster_id() + for pool in df_data.get('pools', []): + d_pool = CephPool() + p_id = pool.get('id') + d_pool.tags['cluster_id'] = cluster_id + d_pool.tags['pool_id'] = p_id + d_pool.fields['agenthost'] = socket.gethostname() + d_pool.tags['agenthost_domain_id'] = \ + '%s_%s' % (cluster_id, d_pool.fields['agenthost']) + d_pool.fields['bytes_used'] = \ + pool.get('stats', {}).get('bytes_used', 0) + d_pool.fields['max_avail'] = \ + pool.get('stats', {}).get('max_avail', 0) + d_pool.fields['objects'] = \ + pool.get('stats', {}).get('objects', 0) + d_pool.fields['wr_bytes'] = \ + pool.get('stats', {}).get('wr_bytes', 0) + d_pool.fields['dirty'] = \ + pool.get('stats', {}).get('dirty', 0) + d_pool.fields['rd_bytes'] = \ + pool.get('stats', {}).get('rd_bytes', 0) + d_pool.fields['raw_bytes_used'] = \ + pool.get('stats', {}).get('raw_bytes_used', 0) + self.data.append(d_pool) diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py new file mode 100644 index 00000000000..ed4dad35e34 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py @@ -0,0 +1,713 @@ +from __future__ import absolute_import + +import re +import socket + +from . import MetricsAgent, MetricsField +from ...common.clusterdata import ClusterAPI +from ...common.cypher import CypherOP, NodeInfo + + +class BaseDP(object): + """ basic diskprediction structure """ + _fields = [] + + def __init__(self, *args, **kwargs): + if len(args) > len(self._fields): + raise TypeError('Expected {} arguments'.format(len(self._fields))) + + for name, value in zip(self._fields, args): + setattr(self, name, value) + + for name in self._fields[len(args):]: + setattr(self, name, kwargs.pop(name)) + + if kwargs: + raise TypeError('Invalid argument(s): {}'.format(','.join(kwargs))) + + +class MGRDpCeph(BaseDP): + _fields = [ + 'fsid', 'health', 'max_osd', 'size', + 'avail_size', 'raw_used', 'raw_used_percent' + ] + + +class MGRDpHost(BaseDP): + _fields = ['fsid', 'host', 'ipaddr'] + + +class MGRDpMon(BaseDP): + _fields = ['fsid', 'host', 'ipaddr'] + + +class MGRDpOsd(BaseDP): + _fields = [ + 'fsid', 'host', '_id', 'uuid', 'up', '_in', 'weight', 'public_addr', + 'cluster_addr', 'state', 'ceph_release', 'osd_devices', 'rotational' + ] + + +class MGRDpMds(BaseDP): + _fields = ['fsid', 'host', 'ipaddr'] + + +class MGRDpPool(BaseDP): + _fields = [ + 'fsid', 'size', 'pool_name', 'pool_id', 'type', 'min_size', + 'pg_num', 'pgp_num', 'created_time', 'pgids', 'osd_ids', 'tiers', 'cache_mode', + 'erasure_code_profile', 'tier_of' + ] + + +class MGRDpRBD(BaseDP): + _fields = ['fsid', '_id', 'name', 'pool_name', 'pool_id'] + + +class MGRDpFS(BaseDP): + _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pools', 'mds_nodes'] + + +class MGRDpPG(BaseDP): + _fields = [ + 'fsid', 'pgid', 'up_osds', 'acting_osds', 'state', + 'objects', 'degraded', 'misplaced', 'unfound' + ] + + +class MGRDpDisk(BaseDP): + _fields = ['host_domain_id', 'host', 'fs_journal_osd', 'bs_db_osd', 'bs_wal_osd', 'data_osd', 'osd_ids'] + + +class DBRelay(MetricsField): + """ DB Relay structure """ + measurement = 'db_relay' + + def __init__(self): + super(DBRelay, self).__init__() + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.tags['dc_tag'] = 'na' + self.tags['host'] = None + self.fields['cmd'] = None + + +class DBRelayAgent(MetricsAgent): + measurement = 'db_relay' + + def __init__(self, *args, **kwargs): + super(DBRelayAgent, self).__init__(*args, **kwargs) + self._cluster_node = None + self._cluster_id = None + self._ceph = ClusterAPI(self._module_inst) + self._osd_maps = self._ceph.module.get('osd_map') + self._mon_maps = self._ceph.module.get('mon_map') + self._fs_maps = self._ceph.module.get('fs_map') + self._osd_metadata = self._ceph.module.get('osd_metadata') + self._host_nodes = dict() + self._osd_nodes = dict() + self._mon_nodes = dict() + self._mds_nodes = dict() + self._dev_nodes = dict() + self._pool_nodes = dict() + self._rbd_nodes = dict() + self._fs_nodes = dict() + # initial ceph all node states + self._init_cluster_node() + self._init_hosts() + self._init_mons() + self._init_mds() + self._init_osds() + self._init_devices() + self._init_pools() + self._init_rbds() + self._init_fs() + + def _init_hosts(self): + hosts = set() + # Add host from osd + osd_data = self._osd_maps.get('osds', []) + for _data in osd_data: + osd_id = _data['osd'] + if not _data.get('in'): + continue + osd_addr = _data['public_addr'].split(':')[0] + osd_metadata = self._ceph.get_osd_metadata(osd_id) + if osd_metadata: + osd_host = osd_metadata['hostname'] + hosts.add((osd_host, osd_addr)) + + # Add host from mon + mons = self._mon_maps.get('mons', []) + for _data in mons: + mon_host = _data['name'] + mon_addr = _data['public_addr'].split(':')[0] + if mon_host: + hosts.add((mon_host, mon_addr)) + + # Add host from mds + file_systems = self._fs_maps.get('filesystems', []) + for _data in file_systems: + mds_info = _data.get('mdsmap').get('info') + for _gid in mds_info: + mds_data = mds_info[_gid] + mds_addr = mds_data.get('addr').split(':')[0] + mds_host = mds_data.get('name') + if mds_host: + hosts.add((mds_host, mds_addr)) + for tp in hosts: + host = tp[0] + self._host_nodes[host] = None + + host_node = NodeInfo( + label='VMHost', + domain_id='{}_{}'.format(self._cluster_id, host), + name=host, + meta={} + ) + self._host_nodes[host] = host_node + + def _init_mons(self): + cluster_id = self._cluster_id + mons = self._mon_maps.get('mons') + for mon in mons: + mon_name = mon.get('name', '') + mon_addr = mon.get('addr', '').split(':')[0] + if mon_name not in self._host_nodes.keys(): + continue + + dp_mon = MGRDpMon( + fsid=cluster_id, + host=mon_name, + ipaddr=mon_addr + ) + + # create mon node + mon_node = NodeInfo( + label='CephMon', + domain_id='{}.mon.{}'.format(cluster_id, mon_name), + name=mon_name, + meta=dp_mon.__dict__ + ) + self._mon_nodes[mon_name] = mon_node + + def _init_mds(self): + cluster_id = self._cluster_id + file_systems = self._fs_maps.get('filesystems', []) + for _data in file_systems: + mds_info = _data.get('mdsmap').get('info') + for _gid in mds_info: + mds_data = mds_info[_gid] + mds_addr = mds_data.get('addr').split(':')[0] + mds_host = mds_data.get('name') + mds_gid = mds_data.get('gid') + + if mds_host not in self._host_nodes: + continue + + dp_mds = MGRDpMds( + fsid=cluster_id, + host=mds_host, + ipaddr=mds_addr + ) + + # create osd node + mds_node = NodeInfo( + label='CephMds', + domain_id='{}.mds.{}'.format(cluster_id, mds_gid), + name='MDS.{}'.format(mds_gid), + meta=dp_mds.__dict__ + ) + self._mds_nodes[mds_host] = mds_node + + def _init_osds(self): + for osd in self._osd_maps.get('osds', []): + osd_id = osd.get('osd', -1) + meta = self._osd_metadata.get(str(osd_id), {}) + osd_host = meta['hostname'] + osd_ceph_version = meta['ceph_version'] + osd_rotational = meta['rotational'] + osd_devices = meta['devices'].split(',') + + # filter 'dm' device. + devices = [] + for devname in osd_devices: + if 'dm' in devname: + continue + devices.append(devname) + + if osd_host not in self._host_nodes.keys(): + continue + self._osd_nodes[str(osd_id)] = None + public_addr = [] + cluster_addr = [] + for addr in osd.get('public_addrs', {}).get('addrvec', []): + public_addr.append(addr.get('addr')) + for addr in osd.get('cluster_addrs', {}).get('addrvec', []): + cluster_addr.append(addr.get('addr')) + dp_osd = MGRDpOsd( + fsid=self._cluster_id, + host=osd_host, + _id=osd_id, + uuid=osd.get('uuid'), + up=osd.get('up'), + _in=osd.get('in'), + weight=osd.get('weight'), + public_addr=','.join(public_addr), + cluster_addr=','.join(cluster_addr), + state=','.join(osd.get('state', [])), + ceph_release=osd_ceph_version, + osd_devices=','.join(devices), + rotational=osd_rotational) + for k, v in meta.items(): + setattr(dp_osd, k, v) + + # create osd node + osd_node = NodeInfo( + label='CephOsd', + domain_id='{}.osd.{}'.format(self._cluster_id, osd_id), + name='OSD.{}'.format(osd_id), + meta=dp_osd.__dict__ + ) + self._osd_nodes[str(osd_id)] = osd_node + + def _init_devices(self): + r = re.compile('[^/dev]\D+') + for osdid, o_val in self._osd_nodes.items(): + o_devs = o_val.meta.get('device_ids', '').split(',') + # fs_store + journal_devs = o_val.meta.get('backend_filestore_journal_dev_node', '').split(',') + # bs_store + bs_db_devs = o_val.meta.get('bluefs_db_dev_node', '').split(',') + bs_wal_devs = o_val.meta.get('bluefs_wal_dev_node', '').split(',') + + for dev in o_devs: + fs_journal = [] + bs_db = [] + bs_wal = [] + data = [] + if len(dev.split('=')) != 2: + continue + dev_name = dev.split('=')[0] + dev_id = dev.split('=')[1] + if not dev_id: + continue + + for j_dev in journal_devs: + if dev_name == ''.join(r.findall(j_dev)): + fs_journal.append(osdid) + for db_dev in bs_db_devs: + if dev_name == ''.join(r.findall(db_dev)): + bs_db.append(osdid) + for wal_dev in bs_wal_devs: + if dev_name == ''.join(r.findall(wal_dev)): + bs_wal.append(osdid) + + if not fs_journal and not bs_db and not bs_wal: + data.append(osdid) + + disk_domain_id = dev_id + if disk_domain_id not in self._dev_nodes.keys(): + dp_disk = MGRDpDisk( + host_domain_id='{}_{}'.format(self._cluster_id, o_val.meta.get('host')), + host=o_val.meta.get('host'), + osd_ids=osdid, + fs_journal_osd=','.join(str(x) for x in fs_journal) if fs_journal else '', + bs_db_osd=','.join(str(x) for x in bs_db) if bs_db else '', + bs_wal_osd=','.join(str(x) for x in bs_wal) if bs_wal else '', + data_osd=','.join(str(x) for x in data) if data else '' + ) + # create disk node + disk_node = NodeInfo( + label='VMDisk', + domain_id=disk_domain_id, + name=dev_name, + meta=dp_disk.__dict__ + ) + self._dev_nodes[disk_domain_id] = disk_node + else: + dev_node = self._dev_nodes[disk_domain_id] + osd_ids = dev_node.meta.get('osd_ids', '') + if osdid not in osd_ids.split(','): + arr_value = osd_ids.split(',') + arr_value.append(str(osdid)) + dev_node.meta['osd_ids'] = ','.join(arr_value) + if fs_journal: + arr_value = None + for t in fs_journal: + value = dev_node.meta.get('fs_journal_osd', '') + if value: + arr_value = value.split(',') + else: + arr_value = [] + if t not in arr_value: + arr_value.append(t) + if arr_value: + dev_node.meta['fs_journal_osd'] = ','.join(str(x) for x in arr_value) + if bs_db: + arr_value = None + for t in bs_db: + value = dev_node.meta.get('bs_db_osd', '') + if value: + arr_value = value.split(',') + else: + arr_value = [] + if t not in arr_value: + arr_value.append(t) + if arr_value: + dev_node.meta['bs_db_osd'] = ','.join(str(x) for x in arr_value) + if bs_wal: + arr_value = None + for t in bs_wal: + value = dev_node.meta.get('bs_wal_osd', '') + if value: + arr_value = value.split(',') + else: + arr_value = [] + if t not in arr_value: + arr_value.append(t) + if arr_value: + dev_node.meta['bs_wal_osd'] = ','.join(str(x) for x in arr_value) + if data: + arr_value = None + for t in data: + value = dev_node.meta.get('data_osd', '') + if value: + arr_value = value.split(',') + else: + arr_value = [] + if t not in arr_value: + arr_value.append(t) + if arr_value: + dev_node.meta['data_osd'] = ','.join(str(x) for x in arr_value) + + def _init_cluster_node(self): + cluster_id = self._ceph.get_cluster_id() + ceph_df_stat = self._ceph.get_ceph_df_state() + dp_cluster = MGRDpCeph( + fsid=cluster_id, + health=self._ceph.get_health_status(), + max_osd=len(self._ceph.get_osds()), + size=ceph_df_stat.get('total_size'), + avail_size=ceph_df_stat.get('avail_size'), + raw_used=ceph_df_stat.get('raw_used_size'), + raw_used_percent=ceph_df_stat.get('used_percent') + ) + cluster_name = cluster_id[-12:] + cluster_node = NodeInfo( + label='CephCluster', + domain_id=cluster_id, + name='cluster-{}'.format(cluster_name), + meta=dp_cluster.__dict__ + ) + self._cluster_id = cluster_id + self._cluster_node = cluster_node + + def _init_pools(self): + pools = self._osd_maps.get('pools', []) + cluster_id = self._cluster_id + for pool in pools: + osds = [] + pgs = self._ceph.get_pgs_up_by_poolid(int(pool.get('pool', -1))) + for pg_id, osd_id in pgs.items(): + for o_id in osd_id: + if o_id not in osds: + osds.append(str(o_id)) + dp_pool = MGRDpPool( + fsid=cluster_id, + size=pool.get('size'), + pool_name=pool.get('pool_name'), + pool_id=pool.get('pool'), + type=pool.get('type'), + min_size=pool.get('min_szie'), + pg_num=pool.get('pg_num'), + pgp_num=pool.get('pg_placement_num'), + created_time=pool.get('create_time'), + pgids=','.join(pgs.keys()), + osd_ids=','.join(osds), + tiers=','.join(str(x) for x in pool.get('tiers', [])), + cache_mode=pool.get('cache_mode', ''), + erasure_code_profile=str(pool.get('erasure_code_profile', '')), + tier_of=str(pool.get('tier_of', -1))) + # create pool node + pool_node = NodeInfo( + label='CephPool', + domain_id='{}_pool_{}'.format(cluster_id, pool.get('pool')), + name=pool.get('pool_name'), + meta=dp_pool.__dict__ + ) + self._pool_nodes[str(pool.get('pool'))] = pool_node + + def _init_rbds(self): + cluster_id = self._cluster_id + for p_id, p_node in self._pool_nodes.items(): + rbds = self._ceph.get_rbd_list(p_node.name) + self._rbd_nodes[str(p_id)] = [] + for rbd in rbds: + dp_rbd = MGRDpRBD( + fsid=cluster_id, + _id=rbd['id'], + name=rbd['name'], + pool_name=rbd['pool_name'], + pool_id=p_id, + ) + # create pool node + rbd_node = NodeInfo( + label='CephRBD', + domain_id='{}_rbd_{}'.format(cluster_id, rbd['id']), + name=rbd['name'], + meta=dp_rbd.__dict__, + ) + self._rbd_nodes[str(p_id)].append(rbd_node) + + def _init_fs(self): + # _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pool', 'mds_nodes'] + cluster_id = self._cluster_id + file_systems = self._fs_maps.get('filesystems', []) + for fs in file_systems: + mdsmap = fs.get('mdsmap', {}) + mds_hostnames = [] + for m, md in mdsmap.get('info', {}).items(): + if md.get('name') not in mds_hostnames: + mds_hostnames.append(md.get('name')) + dp_fs = MGRDpFS( + fsid=cluster_id, + _id=fs.get('id'), + name=mdsmap.get('fs_name'), + metadata_pool=str(mdsmap.get('metadata_pool', -1)), + data_pools=','.join(str(i) for i in mdsmap.get('data_pools', [])), + mds_nodes=','.join(mds_hostnames), + ) + fs_node = NodeInfo( + label='CephFS', + domain_id='{}_fs_{}'.format(cluster_id, fs.get('id')), + name=mdsmap.get('fs_name'), + meta=dp_fs.__dict__, + ) + self._fs_nodes[str(fs.get('id'))] = fs_node + + def _cluster_contains_host(self): + cluster_id = self._cluster_id + cluster_node = self._cluster_node + + # create node relation + for h_id, h_node in self._host_nodes.items(): + data = DBRelay() + # add osd node relationship + cypher_cmd = CypherOP.add_link( + cluster_node, + h_node, + 'CephClusterContainsHost' + ) + cluster_host = socket.gethostname() + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _host_contains_mon(self): + for m_name, m_node in self._mon_nodes.items(): + host_node = self._host_nodes.get(m_name) + if not host_node: + continue + data = DBRelay() + # add mon node relationship + cypher_cmd = CypherOP.add_link( + host_node, + m_node, + 'HostContainsMon' + ) + cluster_host = socket.gethostname() + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (self._cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _host_contains_osd(self): + cluster_id = self._cluster_id + for o_id, o_node in self._osd_nodes.items(): + host_node = self._host_nodes.get(o_node.meta.get('host')) + if not host_node: + continue + data = DBRelay() + # add osd node relationship + cypher_cmd = CypherOP.add_link( + host_node, + o_node, + 'HostContainsOsd' + ) + cluster_host = socket.gethostname() + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _host_contains_mds(self): + cluster_id = self._cluster_id + for m_name, mds_node in self._mds_nodes.items(): + data = DBRelay() + host_node = self._host_nodes.get(mds_node.meta.get('host')) + if not host_node: + continue + # add osd node relationship + cypher_cmd = CypherOP.add_link( + host_node, + mds_node, + 'HostContainsMds' + ) + cluster_host = socket.gethostname() + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _osd_contains_disk(self): + cluster_id = self._cluster_id + cluster_host = socket.gethostname() + for d_name, d_node in self._dev_nodes.items(): + keys = {'data_osd': 'DataDiskOfOSD', + 'fs_journal_osd': 'FsJounalDiskOfOSD', + 'bs_db_osd': 'BsDBDiskOfOSD', + 'bs_wal_osd': 'BsWalDiskOfOSD'} + for k, v in keys.items(): + if not d_node.meta.get(k): + continue + for osdid in d_node.meta.get(k, '').split(','): + data = DBRelay() + osd_node = self._osd_nodes.get(str(osdid)) + if not osd_node: + continue + # add disk node relationship + cypher_cmd = CypherOP.add_link( + osd_node, + d_node, + v) + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + hostname = d_node.meta.get('host', '') + if not hostname: + continue + host_node = self._host_nodes.get(hostname) + if not host_node: + continue + # add osd node relationship + data = DBRelay() + cypher_cmd = CypherOP.add_link( + host_node, + d_node, + 'VmHostContainsVmDisk' + ) + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _pool_contains_osd(self): + cluster_id = self._cluster_id + cluster_host = socket.gethostname() + for p_id, p_node in self._pool_nodes.items(): + for o_id in p_node.meta.get('osd_ids', '').split(','): + osd_node = self._osd_nodes.get(str(o_id)) + if not osd_node: + continue + data = DBRelay() + cypher_cmd = CypherOP.add_link( + osd_node, + p_node, + 'OsdContainsPool' + ) + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _pool_contains_rbd(self): + cluster_id = self._cluster_id + cluster_host = socket.gethostname() + for p_id, p_node in self._pool_nodes.items(): + for rbd_node in self._rbd_nodes.get(str(p_id), []): + if not rbd_node: + continue + data = DBRelay() + cypher_cmd = CypherOP.add_link( + p_node, + rbd_node, + 'PoolContainsRBD' + ) + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _pool_contains_fs(self): + cluster_id = self._cluster_id + cluster_host = socket.gethostname() + for fs_id, fs_node in self._fs_nodes.items(): + pool_attrs = ['metadata_pool', 'data_pools'] + for p_attr in pool_attrs: + pools_id = fs_node.meta.get(p_attr).split(',') + for p_id in pools_id: + p_node = self._pool_nodes.get(str(p_id)) + if p_node: + data = DBRelay() + cypher_cmd = CypherOP.add_link( + p_node, + fs_node, + 'MetadataPoolContainsFS' if p_attr == 'metadata_pool' else 'DataPoolContainsFS' + ) + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + for mds_name in fs_node.meta.get('mds_nodes', '').split(','): + mds_node = self._mds_nodes.get(mds_name) + if not mds_node: + continue + data = DBRelay() + cypher_cmd = CypherOP.add_link( + mds_node, + fs_node, + 'MDSContainsFS' + ) + data.fields['agenthost'] = cluster_host + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['host'] = cluster_host + data.fields['cmd'] = str(cypher_cmd) + self.data.append(data) + + def _collect_data(self): + if not self._module_inst: + return + job_name = ['cluster_contains_host', 'host_contains_mon', 'host_contains_mds', 'host_contains_osd', 'osd_contains_disk', + 'pool_contains_osd', 'pool_contains_rbd', 'pool_contains_fs'] + for job in job_name: + fn = getattr(self, '_%s' % job) + if not fn: + continue + try: + fn() + except Exception as e: + self._module_inst.log.error('dbrelay - execute function {} fail, due to {}'.format(job, str(e))) + continue diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py new file mode 100644 index 00000000000..63c8e870f71 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py @@ -0,0 +1,71 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +from __future__ import absolute_import + +import socket +import time + +from . import AGENT_VERSION, MetricsAgent, MetricsField +from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING +from ...common.clusterdata import ClusterAPI + + +class SAIAgentFields(MetricsField): + """ SAI DiskSmart structure """ + measurement = 'sai_agent' + + def __init__(self): + super(SAIAgentFields, self).__init__() + self.tags['agenthost_domain_id'] = None + self.fields['agent_type'] = str('ceph') + self.fields['agent_version'] = str(AGENT_VERSION) + self.fields['agenthost'] = '' + self.fields['cluster_domain_id'] = '' + self.fields['heartbeat_interval'] = '' + self.fields['host_ip'] = '' + self.fields['host_name'] = '' + self.fields['is_error'] = False + self.fields['is_ceph_error'] = False + self.fields['needs_warning'] = False + self.fields['send'] = None + + +class SAIAgent(MetricsAgent): + measurement = 'sai_agent' + + def _collect_data(self): + mgr_id = [] + c_data = SAIAgentFields() + obj_api = ClusterAPI(self._module_inst) + svc_data = obj_api.get_server(socket.gethostname()) + cluster_state = obj_api.get_health_status() + if not svc_data: + raise Exception('unable to get %s service info' % socket.gethostname()) + # Filter mgr id + for s in svc_data.get('services', []): + if s.get('type', '') == 'mgr': + mgr_id.append(s.get('id')) + + for _id in mgr_id: + mgr_meta = obj_api.get_mgr_metadata(_id) + cluster_id = obj_api.get_cluster_id() + c_data.fields['cluster_domain_id'] = str(cluster_id) + c_data.fields['agenthost'] = str(socket.gethostname()) + c_data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, c_data.fields['agenthost'])) + c_data.fields['heartbeat_interval'] = \ + int(obj_api.get_configuration('diskprediction_upload_metrics_interval')) + c_data.fields['host_ip'] = str(mgr_meta.get('addr', '127.0.0.1')) + c_data.fields['host_name'] = str(socket.gethostname()) + if obj_api.module.status.get('status', '') in [DP_MGR_STAT_WARNING, DP_MGR_STAT_FAILED]: + c_data.fields['is_error'] = bool(True) + else: + c_data.fields['is_error'] = bool(False) + if cluster_state in ['HEALTH_ERR', 'HEALTH_WARN']: + c_data.fields['is_ceph_error'] = bool(True) + c_data.fields['needs_warning'] = bool(True) + c_data.fields['is_error'] = bool(True) + c_data.fields['problems'] = str(obj_api.get_health_checks()) + else: + c_data.fields['is_ceph_error'] = bool(False) + c_data.fields['send'] = int(time.time() * 1000) + self.data.append(c_data) diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py new file mode 100644 index 00000000000..f1f23beec52 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py @@ -0,0 +1,36 @@ +from __future__ import absolute_import + +import socket + +from . import AGENT_VERSION, MetricsAgent, MetricsField +from ...common.clusterdata import ClusterAPI + + +class SAIClusterFields(MetricsField): + """ SAI Host structure """ + measurement = 'sai_cluster' + + def __init__(self): + super(SAIClusterFields, self).__init__() + self.tags['domain_id'] = None + self.fields['agenthost'] = None + self.fields['agenthost_domain_id'] = None + self.fields['name'] = None + self.fields['agent_version'] = str(AGENT_VERSION) + + +class SAICluserAgent(MetricsAgent): + measurement = 'sai_cluster' + + def _collect_data(self): + c_data = SAIClusterFields() + obj_api = ClusterAPI(self._module_inst) + cluster_id = obj_api.get_cluster_id() + + c_data.tags['domain_id'] = str(cluster_id) + c_data.tags['host_domain_id'] = '%s_%s' % (str(cluster_id), str(socket.gethostname())) + c_data.fields['agenthost'] = str(socket.gethostname()) + c_data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, c_data.fields['agenthost'])) + c_data.fields['name'] = 'Ceph mgr plugin' + self.data.append(c_data) diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py new file mode 100644 index 00000000000..1f3e1e54d99 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py @@ -0,0 +1,176 @@ +from __future__ import absolute_import + +import socket + +from . import AGENT_VERSION, MetricsAgent, MetricsField +from ...common import get_human_readable +from ...common.clusterdata import ClusterAPI + + +class SAIDiskFields(MetricsField): + """ SAI Disk structure """ + measurement = 'sai_disk' + + def __init__(self): + super(SAIDiskFields, self).__init__() + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.tags['disk_domain_id'] = None + self.tags['disk_name'] = None + self.tags['disk_wwn'] = None + self.tags['primary_key'] = None + self.fields['cluster_domain_id'] = None + self.fields['host_domain_id'] = None + self.fields['model'] = None + self.fields['serial_number'] = None + self.fields['size'] = None + self.fields['vendor'] = None + self.fields['agent_version'] = str(AGENT_VERSION) + + """disk_status + 0: unknown 1: good 2: failure + """ + self.fields['disk_status'] = 0 + + """disk_type + 0: unknown 1: HDD 2: SSD 3: SSD NVME + 4: SSD SAS 5: SSD SATA 6: HDD SAS 7: HDD SATA + """ + self.fields['disk_type'] = 0 + + +class SAIDiskAgent(MetricsAgent): + measurement = 'sai_disk' + + @staticmethod + def _convert_disk_type(is_ssd, sata_version, protocol): + """ return type: + 0: "Unknown', 1: 'HDD', + 2: 'SSD", 3: "SSD NVME", + 4: "SSD SAS", 5: "SSD SATA", + 6: "HDD SAS", 7: "HDD SATA" + """ + if is_ssd: + if sata_version and not protocol: + disk_type = 5 + elif 'SCSI'.lower() in protocol.lower(): + disk_type = 4 + elif 'NVMe'.lower() in protocol.lower(): + disk_type = 3 + else: + disk_type = 2 + else: + if sata_version and not protocol: + disk_type = 7 + elif 'SCSI'.lower() in protocol.lower(): + disk_type = 6 + else: + disk_type = 1 + return disk_type + + def _collect_data(self): + # process data and save to 'self.data' + obj_api = ClusterAPI(self._module_inst) + cluster_id = obj_api.get_cluster_id() + osds = obj_api.get_osds() + for osd in osds: + if osd.get('osd') is None: + continue + if not osd.get('in'): + continue + osds_meta = obj_api.get_osd_metadata(osd.get('osd')) + if not osds_meta: + continue + osds_smart = obj_api.get_osd_smart(osd.get('osd')) + if not osds_smart: + continue + for dev_name, s_val in osds_smart.items(): + d_data = SAIDiskFields() + d_data.tags['disk_name'] = str(dev_name) + d_data.fields['cluster_domain_id'] = str(cluster_id) + d_data.tags['host_domain_id'] = \ + str('%s_%s' + % (cluster_id, osds_meta.get('hostname', 'None'))) + d_data.fields['agenthost'] = str(socket.gethostname()) + d_data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, d_data.fields['agenthost'])) + serial_number = s_val.get('serial_number') + wwn = s_val.get('wwn', {}) + wwpn = '' + if wwn: + wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0)) + for k in wwn.keys(): + if k in ['naa', 't10', 'eui', 'iqn']: + wwpn = ('%X%s' % (wwn[k], wwpn)).lower() + break + + if wwpn: + d_data.tags['disk_domain_id'] = str(dev_name) + d_data.tags['disk_wwn'] = str(wwpn) + if serial_number: + d_data.fields['serial_number'] = str(serial_number) + else: + d_data.fields['serial_number'] = str(wwpn) + elif serial_number: + d_data.tags['disk_domain_id'] = str(dev_name) + d_data.fields['serial_number'] = str(serial_number) + if wwpn: + d_data.tags['disk_wwn'] = str(wwpn) + else: + d_data.tags['disk_wwn'] = str(serial_number) + else: + d_data.tags['disk_domain_id'] = str(dev_name) + d_data.tags['disk_wwn'] = str(dev_name) + d_data.fields['serial_number'] = str(dev_name) + d_data.tags['primary_key'] = \ + str('%s%s%s' + % (cluster_id, d_data.tags['host_domain_id'], + d_data.tags['disk_domain_id'])) + d_data.fields['disk_status'] = int(1) + is_ssd = True if s_val.get('rotation_rate') == 0 else False + vendor = s_val.get('vendor', None) + model = s_val.get('model_name', None) + if s_val.get('sata_version', {}).get('string'): + sata_version = s_val['sata_version']['string'] + else: + sata_version = '' + if s_val.get('device', {}).get('protocol'): + protocol = s_val['device']['protocol'] + else: + protocol = '' + d_data.fields['disk_type'] = \ + self._convert_disk_type(is_ssd, sata_version, protocol) + d_data.fields['firmware_version'] = \ + str(s_val.get('firmware_version')) + if model: + d_data.fields['model'] = str(model) + if vendor: + d_data.fields['vendor'] = str(vendor) + if sata_version: + d_data.fields['sata_version'] = str(sata_version) + if s_val.get('logical_block_size'): + d_data.fields['sector_size'] = \ + str(str(s_val['logical_block_size'])) + d_data.fields['transport_protocol'] = str('') + d_data.fields['vendor'] = \ + str(s_val.get('model_family', '')).replace('\"', '\'') + try: + if isinstance(s_val.get('user_capacity'), dict): + if isinstance(s_val['user_capacity'].get('bytes'), dict): + user_capacity = \ + s_val['user_capacity'].get('bytes', {}).get('n', 0) + else: + user_capacity = s_val['user_capacity'].get('bytes') + else: + user_capacity = s_val.get('user_capacity', 0) + except ValueError: + user_capacity = 0 + if str(user_capacity).isdigit(): + d_data.fields['size'] = get_human_readable(int(user_capacity), 0) + else: + d_data.fields['size'] = str(user_capacity) + if s_val.get('smart_status', {}).get('passed'): + d_data.fields['smart_health_status'] = 'PASSED' + else: + d_data.fields['smart_health_status'] = 'FAILED' + self.data.append(d_data) diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py new file mode 100644 index 00000000000..0f03a82526b --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py @@ -0,0 +1,156 @@ +from __future__ import absolute_import + +import datetime +import json +import _strptime +import socket +import time + +from . import AGENT_VERSION, MetricsAgent, MetricsField +from ...common.clusterdata import ClusterAPI + + +class SAIDiskSmartFields(MetricsField): + """ SAI DiskSmart structure """ + measurement = 'sai_disk_smart' + + def __init__(self): + super(SAIDiskSmartFields, self).__init__() + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.tags['disk_domain_id'] = None + self.tags['disk_name'] = None + self.tags['disk_wwn'] = None + self.tags['primary_key'] = None + self.fields['cluster_domain_id'] = None + self.fields['host_domain_id'] = None + self.fields['agent_version'] = str(AGENT_VERSION) + + +class SAIDiskSmartAgent(MetricsAgent): + measurement = 'sai_disk_smart' + + def _collect_data(self): + # process data and save to 'self.data' + obj_api = ClusterAPI(self._module_inst) + cluster_id = obj_api.get_cluster_id() + osds = obj_api.get_osds() + for osd in osds: + if osd.get('osd') is None: + continue + if not osd.get('in'): + continue + osds_meta = obj_api.get_osd_metadata(osd.get('osd')) + if not osds_meta: + continue + devs_info = obj_api.get_osd_device_id(osd.get('osd')) + if devs_info: + for dev_name, dev_info in devs_info.items(): + osds_smart = obj_api.get_device_health(dev_info['dev_id']) + if not osds_smart: + continue + # Always pass through last smart data record + o_key = sorted(osds_smart.keys(), reverse=True)[0] + if o_key: + s_date = o_key + s_val = osds_smart[s_date] + smart_data = SAIDiskSmartFields() + smart_data.tags['disk_name'] = str(dev_name) + smart_data.fields['cluster_domain_id'] = str(cluster_id) + smart_data.tags['host_domain_id'] = \ + str('%s_%s' + % (cluster_id, osds_meta.get('hostname', 'None'))) + smart_data.fields['agenthost'] = str(socket.gethostname()) + smart_data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, smart_data.fields['agenthost'])) + # parse attributes + ata_smart = s_val.get('ata_smart_attributes', {}) + for attr in ata_smart.get('table', []): + if attr.get('raw', {}).get('string'): + if str(attr.get('raw', {}).get('string', '0')).isdigit(): + smart_data.fields['%s_raw' % attr.get('id')] = \ + int(attr.get('raw', {}).get('string', '0')) + else: + if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit(): + smart_data.fields['%s_raw' % attr.get('id')] = \ + int(attr.get('raw', {}).get('string', '0').split(' ')[0]) + else: + smart_data.fields['%s_raw' % attr.get('id')] = \ + attr.get('raw', {}).get('value', 0) + smart_data.fields['raw_data'] = str(json.dumps(osds_smart[s_date]).replace("\"", "\'")) + if s_val.get('temperature', {}).get('current') is not None: + smart_data.fields['CurrentDriveTemperature_raw'] = \ + int(s_val['temperature']['current']) + if s_val.get('temperature', {}).get('drive_trip') is not None: + smart_data.fields['DriveTripTemperature_raw'] = \ + int(s_val['temperature']['drive_trip']) + if s_val.get('elements_grown_list') is not None: + smart_data.fields['ElementsInGrownDefectList_raw'] = int(s_val['elements_grown_list']) + if s_val.get('power_on_time', {}).get('hours') is not None: + smart_data.fields['9_raw'] = int(s_val['power_on_time']['hours']) + if s_val.get('scsi_percentage_used_endurance_indicator') is not None: + smart_data.fields['PercentageUsedEnduranceIndicator_raw'] = \ + int(s_val['scsi_percentage_used_endurance_indicator']) + if s_val.get('scsi_error_counter_log') is not None: + s_err_counter = s_val['scsi_error_counter_log'] + for s_key in s_err_counter.keys(): + if s_key.lower() in ['read', 'write']: + for s1_key in s_err_counter[s_key].keys(): + if s1_key.lower() == 'errors_corrected_by_eccfast': + smart_data.fields['ErrorsCorrectedbyECCFast%s_raw' % s_key.capitalize()] = \ + int(s_err_counter[s_key]['errors_corrected_by_eccfast']) + elif s1_key.lower() == 'errors_corrected_by_eccdelayed': + smart_data.fields['ErrorsCorrectedbyECCDelayed%s_raw' % s_key.capitalize()] = \ + int(s_err_counter[s_key]['errors_corrected_by_eccdelayed']) + elif s1_key.lower() == 'errors_corrected_by_rereads_rewrites': + smart_data.fields['ErrorCorrectedByRereadsRewrites%s_raw' % s_key.capitalize()] = \ + int(s_err_counter[s_key]['errors_corrected_by_rereads_rewrites']) + elif s1_key.lower() == 'total_errors_corrected': + smart_data.fields['TotalErrorsCorrected%s_raw' % s_key.capitalize()] = \ + int(s_err_counter[s_key]['total_errors_corrected']) + elif s1_key.lower() == 'correction_algorithm_invocations': + smart_data.fields['CorrectionAlgorithmInvocations%s_raw' % s_key.capitalize()] = \ + int(s_err_counter[s_key]['correction_algorithm_invocations']) + elif s1_key.lower() == 'gigabytes_processed': + smart_data.fields['GigaBytesProcessed%s_raw' % s_key.capitalize()] = \ + float(s_err_counter[s_key]['gigabytes_processed']) + elif s1_key.lower() == 'total_uncorrected_errors': + smart_data.fields['TotalUncorrectedErrors%s_raw' % s_key.capitalize()] = \ + int(s_err_counter[s_key]['total_uncorrected_errors']) + + serial_number = s_val.get('serial_number') + wwn = s_val.get('wwn', {}) + wwpn = '' + if wwn: + wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0)) + for k in wwn.keys(): + if k in ['naa', 't10', 'eui', 'iqn']: + wwpn = ('%X%s' % (wwn[k], wwpn)).lower() + break + if wwpn: + smart_data.tags['disk_domain_id'] = str(dev_info['dev_id']) + smart_data.tags['disk_wwn'] = str(wwpn) + if serial_number: + smart_data.fields['serial_number'] = str(serial_number) + else: + smart_data.fields['serial_number'] = str(wwpn) + elif serial_number: + smart_data.tags['disk_domain_id'] = str(dev_info['dev_id']) + smart_data.fields['serial_number'] = str(serial_number) + if wwpn: + smart_data.tags['disk_wwn'] = str(wwpn) + else: + smart_data.tags['disk_wwn'] = str(serial_number) + else: + smart_data.tags['disk_domain_id'] = str(dev_info['dev_id']) + smart_data.tags['disk_wwn'] = str(dev_name) + smart_data.fields['serial_number'] = str(dev_name) + smart_data.tags['primary_key'] = \ + str('%s%s%s' + % (cluster_id, + smart_data.tags['host_domain_id'], + smart_data.tags['disk_domain_id'])) + smart_data.timestamp = \ + time.mktime(datetime.datetime.strptime( + s_date, '%Y%m%d-%H%M%S').timetuple()) + self.data.append(smart_data) diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py new file mode 100644 index 00000000000..ec1e4cb6a9b --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py @@ -0,0 +1,108 @@ +from __future__ import absolute_import + +import socket + +from . import AGENT_VERSION, MetricsAgent, MetricsField +from ...common.clusterdata import ClusterAPI + + +class SAIHostFields(MetricsField): + """ SAI Host structure """ + measurement = 'sai_host' + + def __init__(self): + super(SAIHostFields, self).__init__() + self.tags['domain_id'] = None + self.fields['agenthost'] = None + self.tags['agenthost_domain_id'] = None + self.fields['cluster_domain_id'] = None + self.fields['name'] = None + self.fields['host_ip'] = None + self.fields['host_ipv6'] = None + self.fields['host_uuid'] = None + self.fields['os_type'] = str('ceph') + self.fields['os_name'] = None + self.fields['os_version'] = None + self.fields['agent_version'] = str(AGENT_VERSION) + + +class SAIHostAgent(MetricsAgent): + measurement = 'sai_host' + + def _collect_data(self): + db = ClusterAPI(self._module_inst) + cluster_id = db.get_cluster_id() + + hosts = set() + + # Parse osd's host + osd_data = db.get_osds() + for _data in osd_data: + osd_id = _data['osd'] + if not _data.get('in'): + continue + osd_addr = _data['public_addr'].split(':')[0] + osd_metadata = db.get_osd_metadata(osd_id) + if osd_metadata: + osd_host = osd_metadata.get('hostname', 'None') + if osd_host not in hosts: + data = SAIHostFields() + data.fields['agenthost'] = str(socket.gethostname()) + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['domain_id'] = \ + str('%s_%s' % (cluster_id, osd_host)) + data.fields['cluster_domain_id'] = str(cluster_id) + data.fields['host_ip'] = osd_addr + data.fields['host_uuid'] = \ + str('%s_%s' % (cluster_id, osd_host)) + data.fields['os_name'] = \ + osd_metadata.get('ceph_release', '') + data.fields['os_version'] = \ + osd_metadata.get('ceph_version_short', '') + data.fields['name'] = 'osd_{}'.format(osd_host) + hosts.add(osd_host) + self.data.append(data) + + # Parse mon node host + mons = db.get_mons() + for _data in mons: + mon_host = _data['name'] + mon_addr = _data['public_addr'].split(':')[0] + if mon_host not in hosts: + data = SAIHostFields() + data.fields['agenthost'] = str(socket.gethostname()) + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['domain_id'] = \ + str('%s_%s' % (cluster_id, mon_host)) + data.fields['cluster_domain_id'] = str(cluster_id) + data.fields['host_ip'] = mon_addr + data.fields['host_uuid'] = \ + str('%s_%s' % (cluster_id, mon_host)) + data.fields['name'] = 'mon_{}'.format(mon_host) + hosts.add((mon_host, mon_addr)) + self.data.append(data) + + # Parse fs host + file_systems = db.get_file_systems() + for _data in file_systems: + mds_info = _data.get('mdsmap').get('info') + for _gid in mds_info: + mds_data = mds_info[_gid] + mds_addr = mds_data.get('addr').split(':')[0] + mds_host = mds_data.get('name') + if mds_host not in hosts: + data = SAIHostFields() + data.fields['agenthost'] = str(socket.gethostname()) + data.tags['agenthost_domain_id'] = \ + str('%s_%s' % (cluster_id, data.fields['agenthost'])) + data.tags['domain_id'] = \ + str('%s_%s' % (cluster_id, mds_host)) + data.fields['cluster_domain_id'] = str(cluster_id) + data.fields['host_ip'] = mds_addr + data.fields['host_uuid'] = \ + str('%s_%s' % (cluster_id, mds_host)) + data.fields['name'] = 'mds_{}'.format(mds_host) + hosts.add((mds_host, mds_addr)) + self.data.append(data) diff --git a/src/pybind/mgr/diskprediction_cloud/common/__init__.py b/src/pybind/mgr/diskprediction_cloud/common/__init__.py new file mode 100644 index 00000000000..cbc3c30ebf6 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/common/__init__.py @@ -0,0 +1,59 @@ +from __future__ import absolute_import +import errno +from functools import wraps +from six.moves.http_client import BAD_REQUEST +import os +import signal + + +DP_MGR_STAT_OK = 'OK' +DP_MGR_STAT_WARNING = 'WARNING' +DP_MGR_STAT_FAILED = 'FAILED' +DP_MGR_STAT_DISABLED = 'DISABLED' +DP_MGR_STAT_ENABLED = 'ENABLED' + + +class DummyResonse: + def __init__(self): + self.resp_json = dict() + self.content = 'DummyResponse' + self.status_code = BAD_REQUEST + + def json(self): + return self.resp_json + + +class TimeoutError(Exception): + pass + + +def timeout(seconds=10, error_message=os.strerror(errno.ETIME)): + def decorator(func): + def _handle_timeout(signum, frame): + raise TimeoutError(error_message) + + def wrapper(*args, **kwargs): + if hasattr(args[0], '_timeout') is not None: + seconds = args[0]._timeout + signal.signal(signal.SIGALRM, _handle_timeout) + signal.alarm(seconds) + try: + result = func(*args, **kwargs) + finally: + signal.alarm(0) + return result + + return wraps(func)(wrapper) + + return decorator + + +def get_human_readable(size, precision=2): + suffixes = ['B', 'KB', 'MB', 'GB', 'TB'] + suffix_index = 0 + while size > 1000 and suffix_index < 4: + # increment the index of the suffix + suffix_index += 1 + # apply the division + size = size/1000.0 + return '%.*d %s' % (precision, size, suffixes[suffix_index]) diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py new file mode 100644 index 00000000000..9f65c731a84 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py @@ -0,0 +1,1775 @@ +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: mainServer.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +from google.protobuf import descriptor_pb2 +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='mainServer.proto', + package='proto', + syntax='proto3', + serialized_pb=_b('\n\x10mainServer.proto\x12\x05proto\x1a\x1cgoogle/api/annotations.proto\"\x07\n\x05\x45mpty\"#\n\x10GeneralMsgOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\")\n\x16GeneralHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x1d\n\nPingOutout\x12\x0f\n\x07message\x18\x01 \x01(\t\"*\n\tTestInput\x12\x1d\n\x06people\x18\x01 \x03(\x0b\x32\r.proto.Person\"\xbe\x01\n\nTestOutput\x12\x10\n\x08strArray\x18\x01 \x03(\t\x12\x31\n\x08mapValue\x18\x02 \x03(\x0b\x32\x1f.proto.TestOutput.MapValueEntry\x12\x19\n\x02pn\x18\x04 \x01(\x0b\x32\r.proto.Person\x12\x1f\n\x07profile\x18\x03 \x03(\x0b\x32\x0e.proto.Profile\x1a/\n\rMapValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcf\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x03\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12)\n\x06phones\x18\x04 \x03(\x0b\x32\x19.proto.Person.PhoneNumber\x1a\x44\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0e\x32\x17.proto.Person.PhoneType\"+\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\"\xa9\x01\n\x07Profile\x12%\n\x08\x66ileInfo\x18\x01 \x01(\x0b\x32\x13.proto.Profile.File\x1aw\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\ttypeInt32\x18\x02 \x01(\x05\x12\x11\n\ttypeInt64\x18\x03 \x01(\x03\x12\x11\n\ttypeFloat\x18\x04 \x01(\x02\x12\x12\n\ntypeDouble\x18\x05 \x01(\x01\x12\x14\n\x0c\x62ooleanValue\x18\x06 \x01(\x08\"4\n\x15GetUsersByStatusInput\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\":\n\x16GetUsersByStatusOutput\x12 \n\x05users\x18\x01 \x03(\x0b\x32\x11.proto.UserOutput\")\n\x16\x41\x63\x63ountHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\nLoginInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\xf2\x01\n\nUserOutput\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05phone\x18\x04 \x01(\t\x12\x11\n\tfirstName\x18\x05 \x01(\t\x12\x10\n\x08lastName\x18\x06 \x01(\t\x12\x13\n\x0b\x63reatedTime\x18\x07 \x01(\t\x12\x11\n\tnamespace\x18\x08 \x01(\t\x12\x12\n\ndomainName\x18\t \x01(\t\x12\x0f\n\x07\x63ompany\x18\n \x01(\t\x12\x0b\n\x03url\x18\x0b \x01(\t\x12\x14\n\x0c\x61gentAccount\x18\x0c \x01(\t\x12\x15\n\ragentPassword\x18\r \x01(\t\"s\n\x0bSingupInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\r\n\x05phone\x18\x02 \x01(\t\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x10\n\x08password\x18\x05 \x01(\t\x12\x0f\n\x07\x63ompany\x18\x06 \x01(\t\"\x1f\n\x0cSingupOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\x0f\x44\x65leteUserInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"C\n\x15UpdateUserStatusInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\"\'\n\x16ResendConfirmCodeInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\"+\n\x0c\x43onfirmInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\"$\n\x11\x44PHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"n\n\x17\x44PGetPhysicalDisksInput\x12\x0f\n\x07hostIds\x18\x01 \x01(\t\x12\x0b\n\x03ids\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"{\n\x19\x44PGetDisksPredictionInput\x12\x17\n\x0fphysicalDiskIds\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"\x1e\n\x0e\x44PBinaryOutput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\",\n\x19\x43ollectionHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\"\n\x10PostMetricsInput\x12\x0e\n\x06points\x18\x01 \x03(\t\" \n\x10PostDBRelayInput\x12\x0c\n\x04\x63mds\x18\x01 \x03(\t\":\n\x17\x43ollectionMessageOutput\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2\x85\x02\n\x07General\x12\x63\n\x10GeneralHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.GeneralHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/general/heartbeat\x12\x46\n\x04Ping\x12\x0c.proto.Empty\x1a\x11.proto.PingOutout\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/general/ping\x12M\n\x04Test\x12\x10.proto.TestInput\x1a\x11.proto.TestOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/general/test:\x01*2\xa4\x06\n\x07\x41\x63\x63ount\x12\x63\n\x10\x41\x63\x63ountHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.AccountHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/account/heartbeat\x12N\n\x05Login\x12\x11.proto.LoginInput\x1a\x11.proto.UserOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x14/apis/v2/users/login:\x01*\x12S\n\x06Signup\x12\x12.proto.SingupInput\x1a\x13.proto.SingupOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/users/signup:\x01*\x12r\n\x11ResendConfirmCode\x12\x1d.proto.ResendConfirmCodeInput\x1a\x17.proto.GeneralMsgOutput\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/apis/v2/users/confirmcode:\x01*\x12_\n\x07\x43onfirm\x12\x13.proto.ConfirmInput\x1a\x17.proto.GeneralMsgOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/users/confirmation:\x01*\x12g\n\x10GetUsersByStatus\x12\x1c.proto.GetUsersByStatusInput\x1a\x1d.proto.GetUsersByStatusOutput\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/apis/v2/users\x12\x63\n\nDeleteUser\x12\x16.proto.DeleteUserInput\x1a\x17.proto.GeneralMsgOutput\"$\x82\xd3\xe4\x93\x02\x1e*\x1c/apis/v2/users/{email}/{key}\x12l\n\x10UpdateUserStatus\x12\x1c.proto.UpdateUserStatusInput\x1a\x17.proto.GeneralMsgOutput\"!\x82\xd3\xe4\x93\x02\x1b\x1a\x16/apis/v2/users/{email}:\x01*2\xcf\x02\n\x0b\x44iskprophet\x12T\n\x0b\x44PHeartbeat\x12\x0c.proto.Empty\x1a\x18.proto.DPHeartbeatOutput\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/dp/heartbeat\x12l\n\x12\x44PGetPhysicalDisks\x12\x1e.proto.DPGetPhysicalDisksInput\x1a\x15.proto.DPBinaryOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/apis/v2/physical-disks\x12|\n\x14\x44PGetDisksPrediction\x12 .proto.DPGetDisksPredictionInput\x1a\x15.proto.DPBinaryOutput\"+\x82\xd3\xe4\x93\x02%\x12#/apis/v2/physical-disks/predictions2\xdb\x02\n\nCollection\x12l\n\x13\x43ollectionHeartbeat\x12\x0c.proto.Empty\x1a .proto.CollectionHeartbeatOutput\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/apis/v2/collection/heartbeat\x12o\n\x0bPostDBRelay\x12\x17.proto.PostDBRelayInput\x1a\x1e.proto.CollectionMessageOutput\"\'\x82\xd3\xe4\x93\x02!\"\x1c/apis/v2/collection/relation:\x01*\x12n\n\x0bPostMetrics\x12\x17.proto.PostMetricsInput\x1a\x1e.proto.CollectionMessageOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/collection/metrics:\x01*b\x06proto3') + , + dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,]) + + + +_PERSON_PHONETYPE = _descriptor.EnumDescriptor( + name='PhoneType', + full_name='proto.Person.PhoneType', + filename=None, + file=DESCRIPTOR, + values=[ + _descriptor.EnumValueDescriptor( + name='MOBILE', index=0, number=0, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='HOME', index=1, number=1, + options=None, + type=None), + _descriptor.EnumValueDescriptor( + name='WORK', index=2, number=2, + options=None, + type=None), + ], + containing_type=None, + options=None, + serialized_start=579, + serialized_end=622, +) +_sym_db.RegisterEnumDescriptor(_PERSON_PHONETYPE) + + +_EMPTY = _descriptor.Descriptor( + name='Empty', + full_name='proto.Empty', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=57, + serialized_end=64, +) + + +_GENERALMSGOUTPUT = _descriptor.Descriptor( + name='GeneralMsgOutput', + full_name='proto.GeneralMsgOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='proto.GeneralMsgOutput.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=66, + serialized_end=101, +) + + +_GENERALHEARTBEATOUTPUT = _descriptor.Descriptor( + name='GeneralHeartbeatOutput', + full_name='proto.GeneralHeartbeatOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='proto.GeneralHeartbeatOutput.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=103, + serialized_end=144, +) + + +_PINGOUTOUT = _descriptor.Descriptor( + name='PingOutout', + full_name='proto.PingOutout', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='proto.PingOutout.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=146, + serialized_end=175, +) + + +_TESTINPUT = _descriptor.Descriptor( + name='TestInput', + full_name='proto.TestInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='people', full_name='proto.TestInput.people', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=177, + serialized_end=219, +) + + +_TESTOUTPUT_MAPVALUEENTRY = _descriptor.Descriptor( + name='MapValueEntry', + full_name='proto.TestOutput.MapValueEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='proto.TestOutput.MapValueEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='value', full_name='proto.TestOutput.MapValueEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')), + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=365, + serialized_end=412, +) + +_TESTOUTPUT = _descriptor.Descriptor( + name='TestOutput', + full_name='proto.TestOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='strArray', full_name='proto.TestOutput.strArray', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='mapValue', full_name='proto.TestOutput.mapValue', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='pn', full_name='proto.TestOutput.pn', index=2, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='profile', full_name='proto.TestOutput.profile', index=3, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_TESTOUTPUT_MAPVALUEENTRY, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=222, + serialized_end=412, +) + + +_PERSON_PHONENUMBER = _descriptor.Descriptor( + name='PhoneNumber', + full_name='proto.Person.PhoneNumber', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='number', full_name='proto.Person.PhoneNumber.number', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='type', full_name='proto.Person.PhoneNumber.type', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=509, + serialized_end=577, +) + +_PERSON = _descriptor.Descriptor( + name='Person', + full_name='proto.Person', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='proto.Person.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='id', full_name='proto.Person.id', index=1, + number=2, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='email', full_name='proto.Person.email', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='phones', full_name='proto.Person.phones', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_PERSON_PHONENUMBER, ], + enum_types=[ + _PERSON_PHONETYPE, + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=415, + serialized_end=622, +) + + +_PROFILE_FILE = _descriptor.Descriptor( + name='File', + full_name='proto.Profile.File', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='proto.Profile.File.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='typeInt32', full_name='proto.Profile.File.typeInt32', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='typeInt64', full_name='proto.Profile.File.typeInt64', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='typeFloat', full_name='proto.Profile.File.typeFloat', index=3, + number=4, type=2, cpp_type=6, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='typeDouble', full_name='proto.Profile.File.typeDouble', index=4, + number=5, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='booleanValue', full_name='proto.Profile.File.booleanValue', index=5, + number=6, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=675, + serialized_end=794, +) + +_PROFILE = _descriptor.Descriptor( + name='Profile', + full_name='proto.Profile', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='fileInfo', full_name='proto.Profile.fileInfo', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[_PROFILE_FILE, ], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=625, + serialized_end=794, +) + + +_GETUSERSBYSTATUSINPUT = _descriptor.Descriptor( + name='GetUsersByStatusInput', + full_name='proto.GetUsersByStatusInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='status', full_name='proto.GetUsersByStatusInput.status', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='key', full_name='proto.GetUsersByStatusInput.key', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=796, + serialized_end=848, +) + + +_GETUSERSBYSTATUSOUTPUT = _descriptor.Descriptor( + name='GetUsersByStatusOutput', + full_name='proto.GetUsersByStatusOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='users', full_name='proto.GetUsersByStatusOutput.users', index=0, + number=1, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=850, + serialized_end=908, +) + + +_ACCOUNTHEARTBEATOUTPUT = _descriptor.Descriptor( + name='AccountHeartbeatOutput', + full_name='proto.AccountHeartbeatOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='proto.AccountHeartbeatOutput.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=910, + serialized_end=951, +) + + +_LOGININPUT = _descriptor.Descriptor( + name='LoginInput', + full_name='proto.LoginInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='email', full_name='proto.LoginInput.email', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='password', full_name='proto.LoginInput.password', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=953, + serialized_end=998, +) + + +_USEROUTPUT = _descriptor.Descriptor( + name='UserOutput', + full_name='proto.UserOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='id', full_name='proto.UserOutput.id', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='email', full_name='proto.UserOutput.email', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='status', full_name='proto.UserOutput.status', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='phone', full_name='proto.UserOutput.phone', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='firstName', full_name='proto.UserOutput.firstName', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='lastName', full_name='proto.UserOutput.lastName', index=5, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='createdTime', full_name='proto.UserOutput.createdTime', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='namespace', full_name='proto.UserOutput.namespace', index=7, + number=8, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='domainName', full_name='proto.UserOutput.domainName', index=8, + number=9, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='company', full_name='proto.UserOutput.company', index=9, + number=10, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='url', full_name='proto.UserOutput.url', index=10, + number=11, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='agentAccount', full_name='proto.UserOutput.agentAccount', index=11, + number=12, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='agentPassword', full_name='proto.UserOutput.agentPassword', index=12, + number=13, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1001, + serialized_end=1243, +) + + +_SINGUPINPUT = _descriptor.Descriptor( + name='SingupInput', + full_name='proto.SingupInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='email', full_name='proto.SingupInput.email', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='phone', full_name='proto.SingupInput.phone', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='firstName', full_name='proto.SingupInput.firstName', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='lastName', full_name='proto.SingupInput.lastName', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='password', full_name='proto.SingupInput.password', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='company', full_name='proto.SingupInput.company', index=5, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1245, + serialized_end=1360, +) + + +_SINGUPOUTPUT = _descriptor.Descriptor( + name='SingupOutput', + full_name='proto.SingupOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='proto.SingupOutput.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1362, + serialized_end=1393, +) + + +_DELETEUSERINPUT = _descriptor.Descriptor( + name='DeleteUserInput', + full_name='proto.DeleteUserInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='email', full_name='proto.DeleteUserInput.email', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='key', full_name='proto.DeleteUserInput.key', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1395, + serialized_end=1440, +) + + +_UPDATEUSERSTATUSINPUT = _descriptor.Descriptor( + name='UpdateUserStatusInput', + full_name='proto.UpdateUserStatusInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='email', full_name='proto.UpdateUserStatusInput.email', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='key', full_name='proto.UpdateUserStatusInput.key', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='status', full_name='proto.UpdateUserStatusInput.status', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1442, + serialized_end=1509, +) + + +_RESENDCONFIRMCODEINPUT = _descriptor.Descriptor( + name='ResendConfirmCodeInput', + full_name='proto.ResendConfirmCodeInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='email', full_name='proto.ResendConfirmCodeInput.email', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1511, + serialized_end=1550, +) + + +_CONFIRMINPUT = _descriptor.Descriptor( + name='ConfirmInput', + full_name='proto.ConfirmInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='email', full_name='proto.ConfirmInput.email', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='code', full_name='proto.ConfirmInput.code', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1552, + serialized_end=1595, +) + + +_DPHEARTBEATOUTPUT = _descriptor.Descriptor( + name='DPHeartbeatOutput', + full_name='proto.DPHeartbeatOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='proto.DPHeartbeatOutput.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1597, + serialized_end=1633, +) + + +_DPGETPHYSICALDISKSINPUT = _descriptor.Descriptor( + name='DPGetPhysicalDisksInput', + full_name='proto.DPGetPhysicalDisksInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='hostIds', full_name='proto.DPGetPhysicalDisksInput.hostIds', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='ids', full_name='proto.DPGetPhysicalDisksInput.ids', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='limit', full_name='proto.DPGetPhysicalDisksInput.limit', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page', full_name='proto.DPGetPhysicalDisksInput.page', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='from', full_name='proto.DPGetPhysicalDisksInput.from', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='to', full_name='proto.DPGetPhysicalDisksInput.to', index=5, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1635, + serialized_end=1745, +) + + +_DPGETDISKSPREDICTIONINPUT = _descriptor.Descriptor( + name='DPGetDisksPredictionInput', + full_name='proto.DPGetDisksPredictionInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='physicalDiskIds', full_name='proto.DPGetDisksPredictionInput.physicalDiskIds', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='status', full_name='proto.DPGetDisksPredictionInput.status', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='limit', full_name='proto.DPGetDisksPredictionInput.limit', index=2, + number=3, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='page', full_name='proto.DPGetDisksPredictionInput.page', index=3, + number=4, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='from', full_name='proto.DPGetDisksPredictionInput.from', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='to', full_name='proto.DPGetDisksPredictionInput.to', index=5, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1747, + serialized_end=1870, +) + + +_DPBINARYOUTPUT = _descriptor.Descriptor( + name='DPBinaryOutput', + full_name='proto.DPBinaryOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='data', full_name='proto.DPBinaryOutput.data', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=_b(""), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1872, + serialized_end=1902, +) + + +_COLLECTIONHEARTBEATOUTPUT = _descriptor.Descriptor( + name='CollectionHeartbeatOutput', + full_name='proto.CollectionHeartbeatOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='proto.CollectionHeartbeatOutput.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1904, + serialized_end=1948, +) + + +_POSTMETRICSINPUT = _descriptor.Descriptor( + name='PostMetricsInput', + full_name='proto.PostMetricsInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='points', full_name='proto.PostMetricsInput.points', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1950, + serialized_end=1984, +) + + +_POSTDBRELAYINPUT = _descriptor.Descriptor( + name='PostDBRelayInput', + full_name='proto.PostDBRelayInput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='cmds', full_name='proto.PostDBRelayInput.cmds', index=0, + number=1, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1986, + serialized_end=2018, +) + + +_COLLECTIONMESSAGEOUTPUT = _descriptor.Descriptor( + name='CollectionMessageOutput', + full_name='proto.CollectionMessageOutput', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='status', full_name='proto.CollectionMessageOutput.status', index=0, + number=1, type=3, cpp_type=2, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + _descriptor.FieldDescriptor( + name='message', full_name='proto.CollectionMessageOutput.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=2020, + serialized_end=2078, +) + +_TESTINPUT.fields_by_name['people'].message_type = _PERSON +_TESTOUTPUT_MAPVALUEENTRY.containing_type = _TESTOUTPUT +_TESTOUTPUT.fields_by_name['mapValue'].message_type = _TESTOUTPUT_MAPVALUEENTRY +_TESTOUTPUT.fields_by_name['pn'].message_type = _PERSON +_TESTOUTPUT.fields_by_name['profile'].message_type = _PROFILE +_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE +_PERSON_PHONENUMBER.containing_type = _PERSON +_PERSON.fields_by_name['phones'].message_type = _PERSON_PHONENUMBER +_PERSON_PHONETYPE.containing_type = _PERSON +_PROFILE_FILE.containing_type = _PROFILE +_PROFILE.fields_by_name['fileInfo'].message_type = _PROFILE_FILE +_GETUSERSBYSTATUSOUTPUT.fields_by_name['users'].message_type = _USEROUTPUT +DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY +DESCRIPTOR.message_types_by_name['GeneralMsgOutput'] = _GENERALMSGOUTPUT +DESCRIPTOR.message_types_by_name['GeneralHeartbeatOutput'] = _GENERALHEARTBEATOUTPUT +DESCRIPTOR.message_types_by_name['PingOutout'] = _PINGOUTOUT +DESCRIPTOR.message_types_by_name['TestInput'] = _TESTINPUT +DESCRIPTOR.message_types_by_name['TestOutput'] = _TESTOUTPUT +DESCRIPTOR.message_types_by_name['Person'] = _PERSON +DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE +DESCRIPTOR.message_types_by_name['GetUsersByStatusInput'] = _GETUSERSBYSTATUSINPUT +DESCRIPTOR.message_types_by_name['GetUsersByStatusOutput'] = _GETUSERSBYSTATUSOUTPUT +DESCRIPTOR.message_types_by_name['AccountHeartbeatOutput'] = _ACCOUNTHEARTBEATOUTPUT +DESCRIPTOR.message_types_by_name['LoginInput'] = _LOGININPUT +DESCRIPTOR.message_types_by_name['UserOutput'] = _USEROUTPUT +DESCRIPTOR.message_types_by_name['SingupInput'] = _SINGUPINPUT +DESCRIPTOR.message_types_by_name['SingupOutput'] = _SINGUPOUTPUT +DESCRIPTOR.message_types_by_name['DeleteUserInput'] = _DELETEUSERINPUT +DESCRIPTOR.message_types_by_name['UpdateUserStatusInput'] = _UPDATEUSERSTATUSINPUT +DESCRIPTOR.message_types_by_name['ResendConfirmCodeInput'] = _RESENDCONFIRMCODEINPUT +DESCRIPTOR.message_types_by_name['ConfirmInput'] = _CONFIRMINPUT +DESCRIPTOR.message_types_by_name['DPHeartbeatOutput'] = _DPHEARTBEATOUTPUT +DESCRIPTOR.message_types_by_name['DPGetPhysicalDisksInput'] = _DPGETPHYSICALDISKSINPUT +DESCRIPTOR.message_types_by_name['DPGetDisksPredictionInput'] = _DPGETDISKSPREDICTIONINPUT +DESCRIPTOR.message_types_by_name['DPBinaryOutput'] = _DPBINARYOUTPUT +DESCRIPTOR.message_types_by_name['CollectionHeartbeatOutput'] = _COLLECTIONHEARTBEATOUTPUT +DESCRIPTOR.message_types_by_name['PostMetricsInput'] = _POSTMETRICSINPUT +DESCRIPTOR.message_types_by_name['PostDBRelayInput'] = _POSTDBRELAYINPUT +DESCRIPTOR.message_types_by_name['CollectionMessageOutput'] = _COLLECTIONMESSAGEOUTPUT +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict( + DESCRIPTOR = _EMPTY, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.Empty) + )) +_sym_db.RegisterMessage(Empty) + +GeneralMsgOutput = _reflection.GeneratedProtocolMessageType('GeneralMsgOutput', (_message.Message,), dict( + DESCRIPTOR = _GENERALMSGOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.GeneralMsgOutput) + )) +_sym_db.RegisterMessage(GeneralMsgOutput) + +GeneralHeartbeatOutput = _reflection.GeneratedProtocolMessageType('GeneralHeartbeatOutput', (_message.Message,), dict( + DESCRIPTOR = _GENERALHEARTBEATOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.GeneralHeartbeatOutput) + )) +_sym_db.RegisterMessage(GeneralHeartbeatOutput) + +PingOutout = _reflection.GeneratedProtocolMessageType('PingOutout', (_message.Message,), dict( + DESCRIPTOR = _PINGOUTOUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.PingOutout) + )) +_sym_db.RegisterMessage(PingOutout) + +TestInput = _reflection.GeneratedProtocolMessageType('TestInput', (_message.Message,), dict( + DESCRIPTOR = _TESTINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.TestInput) + )) +_sym_db.RegisterMessage(TestInput) + +TestOutput = _reflection.GeneratedProtocolMessageType('TestOutput', (_message.Message,), dict( + + MapValueEntry = _reflection.GeneratedProtocolMessageType('MapValueEntry', (_message.Message,), dict( + DESCRIPTOR = _TESTOUTPUT_MAPVALUEENTRY, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.TestOutput.MapValueEntry) + )) + , + DESCRIPTOR = _TESTOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.TestOutput) + )) +_sym_db.RegisterMessage(TestOutput) +_sym_db.RegisterMessage(TestOutput.MapValueEntry) + +Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict( + + PhoneNumber = _reflection.GeneratedProtocolMessageType('PhoneNumber', (_message.Message,), dict( + DESCRIPTOR = _PERSON_PHONENUMBER, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.Person.PhoneNumber) + )) + , + DESCRIPTOR = _PERSON, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.Person) + )) +_sym_db.RegisterMessage(Person) +_sym_db.RegisterMessage(Person.PhoneNumber) + +Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict( + + File = _reflection.GeneratedProtocolMessageType('File', (_message.Message,), dict( + DESCRIPTOR = _PROFILE_FILE, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.Profile.File) + )) + , + DESCRIPTOR = _PROFILE, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.Profile) + )) +_sym_db.RegisterMessage(Profile) +_sym_db.RegisterMessage(Profile.File) + +GetUsersByStatusInput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusInput', (_message.Message,), dict( + DESCRIPTOR = _GETUSERSBYSTATUSINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusInput) + )) +_sym_db.RegisterMessage(GetUsersByStatusInput) + +GetUsersByStatusOutput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusOutput', (_message.Message,), dict( + DESCRIPTOR = _GETUSERSBYSTATUSOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusOutput) + )) +_sym_db.RegisterMessage(GetUsersByStatusOutput) + +AccountHeartbeatOutput = _reflection.GeneratedProtocolMessageType('AccountHeartbeatOutput', (_message.Message,), dict( + DESCRIPTOR = _ACCOUNTHEARTBEATOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.AccountHeartbeatOutput) + )) +_sym_db.RegisterMessage(AccountHeartbeatOutput) + +LoginInput = _reflection.GeneratedProtocolMessageType('LoginInput', (_message.Message,), dict( + DESCRIPTOR = _LOGININPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.LoginInput) + )) +_sym_db.RegisterMessage(LoginInput) + +UserOutput = _reflection.GeneratedProtocolMessageType('UserOutput', (_message.Message,), dict( + DESCRIPTOR = _USEROUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.UserOutput) + )) +_sym_db.RegisterMessage(UserOutput) + +SingupInput = _reflection.GeneratedProtocolMessageType('SingupInput', (_message.Message,), dict( + DESCRIPTOR = _SINGUPINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.SingupInput) + )) +_sym_db.RegisterMessage(SingupInput) + +SingupOutput = _reflection.GeneratedProtocolMessageType('SingupOutput', (_message.Message,), dict( + DESCRIPTOR = _SINGUPOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.SingupOutput) + )) +_sym_db.RegisterMessage(SingupOutput) + +DeleteUserInput = _reflection.GeneratedProtocolMessageType('DeleteUserInput', (_message.Message,), dict( + DESCRIPTOR = _DELETEUSERINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.DeleteUserInput) + )) +_sym_db.RegisterMessage(DeleteUserInput) + +UpdateUserStatusInput = _reflection.GeneratedProtocolMessageType('UpdateUserStatusInput', (_message.Message,), dict( + DESCRIPTOR = _UPDATEUSERSTATUSINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.UpdateUserStatusInput) + )) +_sym_db.RegisterMessage(UpdateUserStatusInput) + +ResendConfirmCodeInput = _reflection.GeneratedProtocolMessageType('ResendConfirmCodeInput', (_message.Message,), dict( + DESCRIPTOR = _RESENDCONFIRMCODEINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.ResendConfirmCodeInput) + )) +_sym_db.RegisterMessage(ResendConfirmCodeInput) + +ConfirmInput = _reflection.GeneratedProtocolMessageType('ConfirmInput', (_message.Message,), dict( + DESCRIPTOR = _CONFIRMINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.ConfirmInput) + )) +_sym_db.RegisterMessage(ConfirmInput) + +DPHeartbeatOutput = _reflection.GeneratedProtocolMessageType('DPHeartbeatOutput', (_message.Message,), dict( + DESCRIPTOR = _DPHEARTBEATOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.DPHeartbeatOutput) + )) +_sym_db.RegisterMessage(DPHeartbeatOutput) + +DPGetPhysicalDisksInput = _reflection.GeneratedProtocolMessageType('DPGetPhysicalDisksInput', (_message.Message,), dict( + DESCRIPTOR = _DPGETPHYSICALDISKSINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.DPGetPhysicalDisksInput) + )) +_sym_db.RegisterMessage(DPGetPhysicalDisksInput) + +DPGetDisksPredictionInput = _reflection.GeneratedProtocolMessageType('DPGetDisksPredictionInput', (_message.Message,), dict( + DESCRIPTOR = _DPGETDISKSPREDICTIONINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.DPGetDisksPredictionInput) + )) +_sym_db.RegisterMessage(DPGetDisksPredictionInput) + +DPBinaryOutput = _reflection.GeneratedProtocolMessageType('DPBinaryOutput', (_message.Message,), dict( + DESCRIPTOR = _DPBINARYOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.DPBinaryOutput) + )) +_sym_db.RegisterMessage(DPBinaryOutput) + +CollectionHeartbeatOutput = _reflection.GeneratedProtocolMessageType('CollectionHeartbeatOutput', (_message.Message,), dict( + DESCRIPTOR = _COLLECTIONHEARTBEATOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.CollectionHeartbeatOutput) + )) +_sym_db.RegisterMessage(CollectionHeartbeatOutput) + +PostMetricsInput = _reflection.GeneratedProtocolMessageType('PostMetricsInput', (_message.Message,), dict( + DESCRIPTOR = _POSTMETRICSINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.PostMetricsInput) + )) +_sym_db.RegisterMessage(PostMetricsInput) + +PostDBRelayInput = _reflection.GeneratedProtocolMessageType('PostDBRelayInput', (_message.Message,), dict( + DESCRIPTOR = _POSTDBRELAYINPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.PostDBRelayInput) + )) +_sym_db.RegisterMessage(PostDBRelayInput) + +CollectionMessageOutput = _reflection.GeneratedProtocolMessageType('CollectionMessageOutput', (_message.Message,), dict( + DESCRIPTOR = _COLLECTIONMESSAGEOUTPUT, + __module__ = 'mainServer_pb2' + # @@protoc_insertion_point(class_scope:proto.CollectionMessageOutput) + )) +_sym_db.RegisterMessage(CollectionMessageOutput) + + +_TESTOUTPUT_MAPVALUEENTRY.has_options = True +_TESTOUTPUT_MAPVALUEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')) + +_GENERAL = _descriptor.ServiceDescriptor( + name='General', + full_name='proto.General', + file=DESCRIPTOR, + index=0, + options=None, + serialized_start=2081, + serialized_end=2342, + methods=[ + _descriptor.MethodDescriptor( + name='GeneralHeartbeat', + full_name='proto.General.GeneralHeartbeat', + index=0, + containing_service=None, + input_type=_EMPTY, + output_type=_GENERALHEARTBEATOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/general/heartbeat')), + ), + _descriptor.MethodDescriptor( + name='Ping', + full_name='proto.General.Ping', + index=1, + containing_service=None, + input_type=_EMPTY, + output_type=_PINGOUTOUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/general/ping')), + ), + _descriptor.MethodDescriptor( + name='Test', + full_name='proto.General.Test', + index=2, + containing_service=None, + input_type=_TESTINPUT, + output_type=_TESTOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/general/test:\001*')), + ), +]) +_sym_db.RegisterServiceDescriptor(_GENERAL) + +DESCRIPTOR.services_by_name['General'] = _GENERAL + + +_ACCOUNT = _descriptor.ServiceDescriptor( + name='Account', + full_name='proto.Account', + file=DESCRIPTOR, + index=1, + options=None, + serialized_start=2345, + serialized_end=3149, + methods=[ + _descriptor.MethodDescriptor( + name='AccountHeartbeat', + full_name='proto.Account.AccountHeartbeat', + index=0, + containing_service=None, + input_type=_EMPTY, + output_type=_ACCOUNTHEARTBEATOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/account/heartbeat')), + ), + _descriptor.MethodDescriptor( + name='Login', + full_name='proto.Account.Login', + index=1, + containing_service=None, + input_type=_LOGININPUT, + output_type=_USEROUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\"\024/apis/v2/users/login:\001*')), + ), + _descriptor.MethodDescriptor( + name='Signup', + full_name='proto.Account.Signup', + index=2, + containing_service=None, + input_type=_SINGUPINPUT, + output_type=_SINGUPOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/users/signup:\001*')), + ), + _descriptor.MethodDescriptor( + name='ResendConfirmCode', + full_name='proto.Account.ResendConfirmCode', + index=3, + containing_service=None, + input_type=_RESENDCONFIRMCODEINPUT, + output_type=_GENERALMSGOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\"\032/apis/v2/users/confirmcode:\001*')), + ), + _descriptor.MethodDescriptor( + name='Confirm', + full_name='proto.Account.Confirm', + index=4, + containing_service=None, + input_type=_CONFIRMINPUT, + output_type=_GENERALMSGOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/users/confirmation:\001*')), + ), + _descriptor.MethodDescriptor( + name='GetUsersByStatus', + full_name='proto.Account.GetUsersByStatus', + index=5, + containing_service=None, + input_type=_GETUSERSBYSTATUSINPUT, + output_type=_GETUSERSBYSTATUSOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\020\022\016/apis/v2/users')), + ), + _descriptor.MethodDescriptor( + name='DeleteUser', + full_name='proto.Account.DeleteUser', + index=6, + containing_service=None, + input_type=_DELETEUSERINPUT, + output_type=_GENERALMSGOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\036*\034/apis/v2/users/{email}/{key}')), + ), + _descriptor.MethodDescriptor( + name='UpdateUserStatus', + full_name='proto.Account.UpdateUserStatus', + index=7, + containing_service=None, + input_type=_UPDATEUSERSTATUSINPUT, + output_type=_GENERALMSGOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\033\032\026/apis/v2/users/{email}:\001*')), + ), +]) +_sym_db.RegisterServiceDescriptor(_ACCOUNT) + +DESCRIPTOR.services_by_name['Account'] = _ACCOUNT + + +_DISKPROPHET = _descriptor.ServiceDescriptor( + name='Diskprophet', + full_name='proto.Diskprophet', + file=DESCRIPTOR, + index=2, + options=None, + serialized_start=3152, + serialized_end=3487, + methods=[ + _descriptor.MethodDescriptor( + name='DPHeartbeat', + full_name='proto.Diskprophet.DPHeartbeat', + index=0, + containing_service=None, + input_type=_EMPTY, + output_type=_DPHEARTBEATOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/dp/heartbeat')), + ), + _descriptor.MethodDescriptor( + name='DPGetPhysicalDisks', + full_name='proto.Diskprophet.DPGetPhysicalDisks', + index=1, + containing_service=None, + input_type=_DPGETPHYSICALDISKSINPUT, + output_type=_DPBINARYOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\022\027/apis/v2/physical-disks')), + ), + _descriptor.MethodDescriptor( + name='DPGetDisksPrediction', + full_name='proto.Diskprophet.DPGetDisksPrediction', + index=2, + containing_service=None, + input_type=_DPGETDISKSPREDICTIONINPUT, + output_type=_DPBINARYOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/apis/v2/physical-disks/predictions')), + ), +]) +_sym_db.RegisterServiceDescriptor(_DISKPROPHET) + +DESCRIPTOR.services_by_name['Diskprophet'] = _DISKPROPHET + + +_COLLECTION = _descriptor.ServiceDescriptor( + name='Collection', + full_name='proto.Collection', + file=DESCRIPTOR, + index=3, + options=None, + serialized_start=3490, + serialized_end=3837, + methods=[ + _descriptor.MethodDescriptor( + name='CollectionHeartbeat', + full_name='proto.Collection.CollectionHeartbeat', + index=0, + containing_service=None, + input_type=_EMPTY, + output_type=_COLLECTIONHEARTBEATOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\022\035/apis/v2/collection/heartbeat')), + ), + _descriptor.MethodDescriptor( + name='PostDBRelay', + full_name='proto.Collection.PostDBRelay', + index=1, + containing_service=None, + input_type=_POSTDBRELAYINPUT, + output_type=_COLLECTIONMESSAGEOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002!\"\034/apis/v2/collection/relation:\001*')), + ), + _descriptor.MethodDescriptor( + name='PostMetrics', + full_name='proto.Collection.PostMetrics', + index=2, + containing_service=None, + input_type=_POSTMETRICSINPUT, + output_type=_COLLECTIONMESSAGEOUTPUT, + options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/collection/metrics:\001*')), + ), +]) +_sym_db.RegisterServiceDescriptor(_COLLECTION) + +DESCRIPTOR.services_by_name['Collection'] = _COLLECTION + +# @@protoc_insertion_point(module_scope) diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py new file mode 100644 index 00000000000..c1c32178a6a --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py @@ -0,0 +1,395 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +import client_pb2 as mainServer__pb2 + + +class GeneralStub(object): + """-------------------------- General ------------------------------------- + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.GeneralHeartbeat = channel.unary_unary( + '/proto.General/GeneralHeartbeat', + request_serializer=mainServer__pb2.Empty.SerializeToString, + response_deserializer=mainServer__pb2.GeneralHeartbeatOutput.FromString, + ) + self.Ping = channel.unary_unary( + '/proto.General/Ping', + request_serializer=mainServer__pb2.Empty.SerializeToString, + response_deserializer=mainServer__pb2.PingOutout.FromString, + ) + self.Test = channel.unary_unary( + '/proto.General/Test', + request_serializer=mainServer__pb2.TestInput.SerializeToString, + response_deserializer=mainServer__pb2.TestOutput.FromString, + ) + + +class GeneralServicer(object): + """-------------------------- General ------------------------------------- + """ + + def GeneralHeartbeat(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Ping(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Test(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_GeneralServicer_to_server(servicer, server): + rpc_method_handlers = { + 'GeneralHeartbeat': grpc.unary_unary_rpc_method_handler( + servicer.GeneralHeartbeat, + request_deserializer=mainServer__pb2.Empty.FromString, + response_serializer=mainServer__pb2.GeneralHeartbeatOutput.SerializeToString, + ), + 'Ping': grpc.unary_unary_rpc_method_handler( + servicer.Ping, + request_deserializer=mainServer__pb2.Empty.FromString, + response_serializer=mainServer__pb2.PingOutout.SerializeToString, + ), + 'Test': grpc.unary_unary_rpc_method_handler( + servicer.Test, + request_deserializer=mainServer__pb2.TestInput.FromString, + response_serializer=mainServer__pb2.TestOutput.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'proto.General', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class AccountStub(object): + """-------------------------- SERVER ACCOUNT ------------------------------ + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.AccountHeartbeat = channel.unary_unary( + '/proto.Account/AccountHeartbeat', + request_serializer=mainServer__pb2.Empty.SerializeToString, + response_deserializer=mainServer__pb2.AccountHeartbeatOutput.FromString, + ) + self.Login = channel.unary_unary( + '/proto.Account/Login', + request_serializer=mainServer__pb2.LoginInput.SerializeToString, + response_deserializer=mainServer__pb2.UserOutput.FromString, + ) + self.Signup = channel.unary_unary( + '/proto.Account/Signup', + request_serializer=mainServer__pb2.SingupInput.SerializeToString, + response_deserializer=mainServer__pb2.SingupOutput.FromString, + ) + self.ResendConfirmCode = channel.unary_unary( + '/proto.Account/ResendConfirmCode', + request_serializer=mainServer__pb2.ResendConfirmCodeInput.SerializeToString, + response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, + ) + self.Confirm = channel.unary_unary( + '/proto.Account/Confirm', + request_serializer=mainServer__pb2.ConfirmInput.SerializeToString, + response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, + ) + self.GetUsersByStatus = channel.unary_unary( + '/proto.Account/GetUsersByStatus', + request_serializer=mainServer__pb2.GetUsersByStatusInput.SerializeToString, + response_deserializer=mainServer__pb2.GetUsersByStatusOutput.FromString, + ) + self.DeleteUser = channel.unary_unary( + '/proto.Account/DeleteUser', + request_serializer=mainServer__pb2.DeleteUserInput.SerializeToString, + response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, + ) + self.UpdateUserStatus = channel.unary_unary( + '/proto.Account/UpdateUserStatus', + request_serializer=mainServer__pb2.UpdateUserStatusInput.SerializeToString, + response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString, + ) + + +class AccountServicer(object): + """-------------------------- SERVER ACCOUNT ------------------------------ + """ + + def AccountHeartbeat(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Login(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Signup(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ResendConfirmCode(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Confirm(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetUsersByStatus(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeleteUser(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def UpdateUserStatus(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_AccountServicer_to_server(servicer, server): + rpc_method_handlers = { + 'AccountHeartbeat': grpc.unary_unary_rpc_method_handler( + servicer.AccountHeartbeat, + request_deserializer=mainServer__pb2.Empty.FromString, + response_serializer=mainServer__pb2.AccountHeartbeatOutput.SerializeToString, + ), + 'Login': grpc.unary_unary_rpc_method_handler( + servicer.Login, + request_deserializer=mainServer__pb2.LoginInput.FromString, + response_serializer=mainServer__pb2.UserOutput.SerializeToString, + ), + 'Signup': grpc.unary_unary_rpc_method_handler( + servicer.Signup, + request_deserializer=mainServer__pb2.SingupInput.FromString, + response_serializer=mainServer__pb2.SingupOutput.SerializeToString, + ), + 'ResendConfirmCode': grpc.unary_unary_rpc_method_handler( + servicer.ResendConfirmCode, + request_deserializer=mainServer__pb2.ResendConfirmCodeInput.FromString, + response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, + ), + 'Confirm': grpc.unary_unary_rpc_method_handler( + servicer.Confirm, + request_deserializer=mainServer__pb2.ConfirmInput.FromString, + response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, + ), + 'GetUsersByStatus': grpc.unary_unary_rpc_method_handler( + servicer.GetUsersByStatus, + request_deserializer=mainServer__pb2.GetUsersByStatusInput.FromString, + response_serializer=mainServer__pb2.GetUsersByStatusOutput.SerializeToString, + ), + 'DeleteUser': grpc.unary_unary_rpc_method_handler( + servicer.DeleteUser, + request_deserializer=mainServer__pb2.DeleteUserInput.FromString, + response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, + ), + 'UpdateUserStatus': grpc.unary_unary_rpc_method_handler( + servicer.UpdateUserStatus, + request_deserializer=mainServer__pb2.UpdateUserStatusInput.FromString, + response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'proto.Account', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class DiskprophetStub(object): + """------------------------ SERVER DISKPROPHET --------------------------- + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.DPHeartbeat = channel.unary_unary( + '/proto.Diskprophet/DPHeartbeat', + request_serializer=mainServer__pb2.Empty.SerializeToString, + response_deserializer=mainServer__pb2.DPHeartbeatOutput.FromString, + ) + self.DPGetPhysicalDisks = channel.unary_unary( + '/proto.Diskprophet/DPGetPhysicalDisks', + request_serializer=mainServer__pb2.DPGetPhysicalDisksInput.SerializeToString, + response_deserializer=mainServer__pb2.DPBinaryOutput.FromString, + ) + self.DPGetDisksPrediction = channel.unary_unary( + '/proto.Diskprophet/DPGetDisksPrediction', + request_serializer=mainServer__pb2.DPGetDisksPredictionInput.SerializeToString, + response_deserializer=mainServer__pb2.DPBinaryOutput.FromString, + ) + + +class DiskprophetServicer(object): + """------------------------ SERVER DISKPROPHET --------------------------- + """ + + def DPHeartbeat(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DPGetPhysicalDisks(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DPGetDisksPrediction(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_DiskprophetServicer_to_server(servicer, server): + rpc_method_handlers = { + 'DPHeartbeat': grpc.unary_unary_rpc_method_handler( + servicer.DPHeartbeat, + request_deserializer=mainServer__pb2.Empty.FromString, + response_serializer=mainServer__pb2.DPHeartbeatOutput.SerializeToString, + ), + 'DPGetPhysicalDisks': grpc.unary_unary_rpc_method_handler( + servicer.DPGetPhysicalDisks, + request_deserializer=mainServer__pb2.DPGetPhysicalDisksInput.FromString, + response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString, + ), + 'DPGetDisksPrediction': grpc.unary_unary_rpc_method_handler( + servicer.DPGetDisksPrediction, + request_deserializer=mainServer__pb2.DPGetDisksPredictionInput.FromString, + response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'proto.Diskprophet', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + +class CollectionStub(object): + """------------------------ SERVER Collection --------------------------- + + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.CollectionHeartbeat = channel.unary_unary( + '/proto.Collection/CollectionHeartbeat', + request_serializer=mainServer__pb2.Empty.SerializeToString, + response_deserializer=mainServer__pb2.CollectionHeartbeatOutput.FromString, + ) + self.PostDBRelay = channel.unary_unary( + '/proto.Collection/PostDBRelay', + request_serializer=mainServer__pb2.PostDBRelayInput.SerializeToString, + response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString, + ) + self.PostMetrics = channel.unary_unary( + '/proto.Collection/PostMetrics', + request_serializer=mainServer__pb2.PostMetricsInput.SerializeToString, + response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString, + ) + + +class CollectionServicer(object): + """------------------------ SERVER Collection --------------------------- + + """ + + def CollectionHeartbeat(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PostDBRelay(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PostMetrics(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CollectionServicer_to_server(servicer, server): + rpc_method_handlers = { + 'CollectionHeartbeat': grpc.unary_unary_rpc_method_handler( + servicer.CollectionHeartbeat, + request_deserializer=mainServer__pb2.Empty.FromString, + response_serializer=mainServer__pb2.CollectionHeartbeatOutput.SerializeToString, + ), + 'PostDBRelay': grpc.unary_unary_rpc_method_handler( + servicer.PostDBRelay, + request_deserializer=mainServer__pb2.PostDBRelayInput.FromString, + response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString, + ), + 'PostMetrics': grpc.unary_unary_rpc_method_handler( + servicer.PostMetrics, + request_deserializer=mainServer__pb2.PostMetricsInput.FromString, + response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'proto.Collection', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py b/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py new file mode 100644 index 00000000000..41997a36300 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py @@ -0,0 +1,461 @@ +""" +Ceph database API + +""" +from __future__ import absolute_import + +import json +import rbd +from mgr_module import CommandResult + +GB = 1024 * 1024 * 1024 + + +RBD_FEATURES_NAME_MAPPING = { + rbd.RBD_FEATURE_LAYERING: 'layering', + rbd.RBD_FEATURE_STRIPINGV2: 'striping', + rbd.RBD_FEATURE_EXCLUSIVE_LOCK: 'exclusive-lock', + rbd.RBD_FEATURE_OBJECT_MAP: 'object-map', + rbd.RBD_FEATURE_FAST_DIFF: 'fast-diff', + rbd.RBD_FEATURE_DEEP_FLATTEN: 'deep-flatten', + rbd.RBD_FEATURE_JOURNALING: 'journaling', + rbd.RBD_FEATURE_DATA_POOL: 'data-pool', + rbd.RBD_FEATURE_OPERATIONS: 'operations', +} + + +def differentiate(data1, data2): + """ + # >>> times = [0, 2] + # >>> values = [100, 101] + # >>> differentiate(*zip(times, values)) + 0.5 + """ + return (data2[1] - data1[1]) / float(data2[0] - data1[0]) + + +class ClusterAPI(object): + + def __init__(self, module_obj): + self.module = module_obj + + @staticmethod + def format_bitmask(features): + """ + Formats the bitmask: + # >>> format_bitmask(45) + ['deep-flatten', 'exclusive-lock', 'layering', 'object-map'] + """ + names = [val for key, val in RBD_FEATURES_NAME_MAPPING.items() + if key & features == key] + return sorted(names) + + def _open_connection(self, pool_name='device_health_metrics'): + pools = self.module.rados.list_pools() + is_pool = False + for pool in pools: + if pool == pool_name: + is_pool = True + break + if not is_pool: + self.module.log.debug('create %s pool' % pool_name) + # create pool + result = CommandResult('') + self.module.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd pool create', + 'format': 'json', + 'pool': pool_name, + 'pg_num': 1, + }), '') + r, outb, outs = result.wait() + assert r == 0 + + # set pool application + result = CommandResult('') + self.module.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'osd pool application enable', + 'format': 'json', + 'pool': pool_name, + 'app': 'mgr_devicehealth', + }), '') + r, outb, outs = result.wait() + assert r == 0 + + ioctx = self.module.rados.open_ioctx(pool_name) + return ioctx + + @classmethod + def _rbd_disk_usage(cls, image, snaps, whole_object=True): + class DUCallback(object): + def __init__(self): + self.used_size = 0 + + def __call__(self, offset, length, exists): + if exists: + self.used_size += length + snap_map = {} + prev_snap = None + total_used_size = 0 + for _, size, name in snaps: + image.set_snap(name) + du_callb = DUCallback() + image.diff_iterate(0, size, prev_snap, du_callb, + whole_object=whole_object) + snap_map[name] = du_callb.used_size + total_used_size += du_callb.used_size + prev_snap = name + return total_used_size, snap_map + + def _rbd_image(self, ioctx, pool_name, image_name): + with rbd.Image(ioctx, image_name) as img: + stat = img.stat() + stat['name'] = image_name + stat['id'] = img.id() + stat['pool_name'] = pool_name + features = img.features() + stat['features'] = features + stat['features_name'] = self.format_bitmask(features) + + # the following keys are deprecated + del stat['parent_pool'] + del stat['parent_name'] + stat['timestamp'] = '{}Z'.format(img.create_timestamp() + .isoformat()) + stat['stripe_count'] = img.stripe_count() + stat['stripe_unit'] = img.stripe_unit() + stat['data_pool'] = None + try: + parent_info = img.parent_info() + stat['parent'] = { + 'pool_name': parent_info[0], + 'image_name': parent_info[1], + 'snap_name': parent_info[2] + } + except rbd.ImageNotFound: + # no parent image + stat['parent'] = None + # snapshots + stat['snapshots'] = [] + for snap in img.list_snaps(): + snap['timestamp'] = '{}Z'.format( + img.get_snap_timestamp(snap['id']).isoformat()) + snap['is_protected'] = img.is_protected_snap(snap['name']) + snap['used_bytes'] = None + snap['children'] = [] + img.set_snap(snap['name']) + for child_pool_name, child_image_name in img.list_children(): + snap['children'].append({ + 'pool_name': child_pool_name, + 'image_name': child_image_name + }) + stat['snapshots'].append(snap) + # disk usage + if 'fast-diff' in stat['features_name']: + snaps = [(s['id'], s['size'], s['name']) + for s in stat['snapshots']] + snaps.sort(key=lambda s: s[0]) + snaps += [(snaps[-1][0]+1 if snaps else 0, stat['size'], None)] + total_prov_bytes, snaps_prov_bytes = self._rbd_disk_usage( + img, snaps, True) + stat['total_disk_usage'] = total_prov_bytes + for snap, prov_bytes in snaps_prov_bytes.items(): + if snap is None: + stat['disk_usage'] = prov_bytes + continue + for ss in stat['snapshots']: + if ss['name'] == snap: + ss['disk_usage'] = prov_bytes + break + else: + stat['total_disk_usage'] = None + stat['disk_usage'] = None + return stat + + def get_rbd_list(self, pool_name=None): + if pool_name: + pools = [pool_name] + else: + pools = [] + for data in self.get_osd_pools(): + pools.append(data['pool_name']) + result = [] + for pool in pools: + rbd_inst = rbd.RBD() + with self._open_connection(str(pool)) as ioctx: + names = rbd_inst.list(ioctx) + for name in names: + try: + stat = self._rbd_image(ioctx, pool_name, name) + except rbd.ImageNotFound: + continue + result.append(stat) + return result + + def get_object_pg_info(self, pool_name, object_name): + result = CommandResult('') + data_jaon = {} + self.module.send_command( + result, 'mon', '', json.dumps({ + 'prefix': 'osd map', + 'format': 'json', + 'pool': pool_name, + 'object': object_name, + }), '') + ret, outb, outs = result.wait() + try: + if outb: + data_jaon = json.loads(outb) + else: + self.module.log.error('unable to get %s pg info' % pool_name) + except Exception as e: + self.module.log.error( + 'unable to get %s pg, error: %s' % (pool_name, str(e))) + return data_jaon + + @staticmethod + def _list_objects(ioctx, image_id): + objects = [] + object_iterator = ioctx.list_objects() + while True: + try: + rados_object = object_iterator.next() + if image_id is None: + objects.append(str(rados_object.key)) + else: + v = str(rados_object.key).split('.') + if len(v) >= 2 and v[1] == image_id: + objects.append(str(rados_object.key)) + except StopIteration: + break + return objects + + def get_rbd_info(self, pool_name, image_name): + with self._open_connection(pool_name) as ioctx: + try: + stat = self._rbd_image(ioctx, pool_name, image_name) + if stat.get('id'): + objects = self._list_objects(ioctx, stat.get('id')) + if objects: + stat['objects'] = objects + stat['pgs'] = list() + for obj_name in objects: + pgs_data = self.get_object_pg_info(pool_name, obj_name) + stat['pgs'].extend([pgs_data]) + except rbd.ImageNotFound: + stat = {} + return stat + + def get_pool_objects(self, pool_name, image_id=None): + # list_objects + try: + with self._open_connection(pool_name) as ioctx: + objects = self._list_objects(ioctx, image_id) + except: + objects = [] + return objects + + def get_ceph_df_state(self): + ceph_stats = self.module.get('df').get('stats', {}) + if not ceph_stats: + return {'total_size': 0, 'avail_size': 0, 'raw_used_size': 0, 'raw_used_percent': 0} + total_size = round(float(ceph_stats.get('total_bytes', 0)) / GB) + avail_size = round(float(ceph_stats.get('total_avail_bytes', 0)) / GB, 2) + raw_used_size = round(float(ceph_stats.get('total_used_bytes', 0)) / GB, 2) + raw_used_percent = round(float(raw_used_size) / float(total_size) * 100, 2) + return {'total_size': total_size, 'avail_size': avail_size, 'raw_used_size': raw_used_size, + 'used_percent': raw_used_percent} + + def get_osd_metadata(self, osd_id=None): + if osd_id is not None: + return self.module.get('osd_metadata')[str(osd_id)] + return self.module.get('osd_metadata') + + def get_mgr_metadata(self, mgr_id): + return self.module.get_metadata('mgr', mgr_id) + + def get_osd_epoch(self): + return self.module.get('osd_map').get('epoch', 0) + + def get_osds(self): + return self.module.get('osd_map').get('osds', []) + + def get_max_osd(self): + return self.module.get('osd_map').get('max_osd', '') + + def get_osd_pools(self): + return self.module.get('osd_map').get('pools', []) + + def get_pool_bytes_used(self, pool_id): + bytes_used = None + pools = self.module.get('df').get('pools', []) + for pool in pools: + if pool_id == pool['id']: + bytes_used = pool['stats']['bytes_used'] + return bytes_used + + def get_cluster_id(self): + return self.module.get('mon_map').get('fsid') + + def get_health_status(self): + health = json.loads(self.module.get('health')['json']) + return health.get('status') + + def get_health_checks(self): + health = json.loads(self.module.get('health')['json']) + if health.get('checks'): + message = '' + checks = health['checks'] + for key in checks.keys(): + if message: + message += ';' + if checks[key].get('summary', {}).get('message', ''): + message += checks[key]['summary']['message'] + return message + else: + return '' + + def get_mons(self): + return self.module.get('mon_map').get('mons', []) + + def get_mon_status(self): + mon_status = json.loads(self.module.get('mon_status')['json']) + return mon_status + + def get_osd_smart(self, osd_id, device_id=None): + osd_devices = [] + osd_smart = {} + devices = self.module.get('devices') + for dev in devices.get('devices', []): + osd = '' + daemons = dev.get('daemons', []) + for daemon in daemons: + if daemon[4:] != str(osd_id): + continue + osd = daemon + if not osd: + continue + if dev.get('devid') and dev.get('devid') not in osd_devices: + osd_devices.append(dev.get('devid')) + for dev_id in osd_devices: + o_key = '' + if device_id and dev_id != device_id: + continue + smart_data = self.get_device_health(dev_id) + if smart_data: + o_key = sorted(smart_data.keys(), reverse=True)[0] + if o_key and smart_data and smart_data.values(): + dev_smart = smart_data[o_key] + if dev_smart: + osd_smart[dev_id] = dev_smart + return osd_smart + + def get_device_health(self, devid): + health_data = {} + try: + r, outb, outs = self.module.remote('devicehealth', 'show_device_metrics', devid=devid, sample='') + if r != 0: + self.module.log.error('failed to get device %s health', devid) + health_data = {} + else: + health_data = json.loads(outb) + except Exception as e: + self.module.log.error('failed to get device %s health data due to %s', devid, str(e)) + return health_data + + def get_osd_hostname(self, osd_id): + result = '' + osd_metadata = self.get_osd_metadata(osd_id) + if osd_metadata: + osd_host = osd_metadata.get('hostname', 'None') + result = osd_host + return result + + def get_osd_device_id(self, osd_id): + result = {} + if not str(osd_id).isdigit(): + if str(osd_id)[0:4] == 'osd.': + osdid = osd_id[4:] + else: + raise Exception('not a valid id or number') + else: + osdid = osd_id + osd_metadata = self.get_osd_metadata(osdid) + if osd_metadata: + osd_device_ids = osd_metadata.get('device_ids', '') + if osd_device_ids: + result = {} + for osd_device_id in osd_device_ids.split(','): + dev_name = '' + if len(str(osd_device_id).split('=')) >= 2: + dev_name = osd_device_id.split('=')[0] + dev_id = osd_device_id.split('=')[1] + else: + dev_id = osd_device_id + if dev_name: + result[dev_name] = {'dev_id': dev_id} + return result + + def get_file_systems(self): + return self.module.get('fs_map').get('filesystems', []) + + def set_device_life_expectancy(self, device_id, from_date, to_date=None): + result = CommandResult('') + + if to_date is None: + self.module.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'device set-life-expectancy', + 'devid': device_id, + 'from': from_date + }), '') + else: + self.module.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'device set-life-expectancy', + 'devid': device_id, + 'from': from_date, + 'to': to_date + }), '') + ret, outb, outs = result.wait() + if ret != 0: + self.module.log.error( + 'failed to set device life expectancy, %s' % outs) + return ret + + def reset_device_life_expectancy(self, device_id): + result = CommandResult('') + self.module.send_command(result, 'mon', '', json.dumps({ + 'prefix': 'device rm-life-expectancy', + 'devid': device_id + }), '') + ret, outb, outs = result.wait() + if ret != 0: + self.module.log.error( + 'failed to reset device life expectancy, %s' % outs) + return ret + + def get_server(self, hostname): + return self.module.get_server(hostname) + + def get_configuration(self, key): + return self.module.get_configuration(key) + + def get_rate(self, svc_type, svc_name, path): + """returns most recent rate""" + data = self.module.get_counter(svc_type, svc_name, path)[path] + + if data and len(data) > 1: + return differentiate(*data[-2:]) + return 0.0 + + def get_latest(self, daemon_type, daemon_name, counter): + return self.module.get_latest(daemon_type, daemon_name, counter) + + def get_pgs_up_by_poolid(self, poolid): + pgs = {} + try: + osd_map = self.module.get_osdmap() + if not osd_map: + return {} + pgs = osd_map.map_pool_pgs_up(int(poolid)) + return pgs + except: + return {} diff --git a/src/pybind/mgr/diskprediction_cloud/common/cypher.py b/src/pybind/mgr/diskprediction_cloud/common/cypher.py new file mode 100644 index 00000000000..7b7b60e5059 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/common/cypher.py @@ -0,0 +1,71 @@ +from __future__ import absolute_import + +import time + + +class NodeInfo(object): + """ Neo4j Node information """ + def __init__(self, label, domain_id, name, meta): + self.label = label + self.domain_id = domain_id + self.name = name + self.meta = meta + + +class CypherOP(object): + """ Cypher Operation """ + + @staticmethod + def update(node, key, value, timestamp=int(time.time()*(1000**3))): + result = '' + if isinstance(node, NodeInfo): + if key != 'time': + cy_value = '\'%s\'' % value + else: + cy_value = value + result = \ + 'set %s.%s=case when %s.time >= %s then %s.%s ELSE %s end' % ( + node.label, key, node.label, timestamp, node.label, key, + cy_value) + return result + + @staticmethod + def create_or_merge(node, timestamp=int(time.time()*(1000**3))): + result = '' + if isinstance(node, NodeInfo): + meta_list = [] + if isinstance(node.meta, dict): + for key, value in node.meta.items(): + meta_list.append(CypherOP.update(node, key, value, timestamp)) + domain_id = '{domainId:\'%s\'}' % node.domain_id + if meta_list: + result = 'merge (%s:%s %s) %s %s %s' % ( + node.label, node.label, + domain_id, + CypherOP.update(node, 'name', node.name, timestamp), + ' '.join(meta_list), + CypherOP.update(node, 'time', timestamp, timestamp)) + else: + result = 'merge (%s:%s %s) %s %s' % ( + node.label, node.label, + domain_id, + CypherOP.update(node, 'name', node.name, timestamp), + CypherOP.update(node, 'time', timestamp, timestamp)) + return result + + @staticmethod + def add_link(snode, dnode, relationship, timestamp=None): + result = '' + if timestamp is None: + timestamp = int(time.time()*(1000**3)) + if isinstance(snode, NodeInfo) and isinstance(dnode, NodeInfo): + cy_snode = CypherOP.create_or_merge(snode, timestamp) + cy_dnode = CypherOP.create_or_merge(dnode, timestamp) + target = snode.label + dnode.label + link = 'merge (%s)-[%s:%s]->(%s) set %s.time=case when %s.time >= %s then %s.time ELSE %s end' % ( + snode.label, target, relationship, + dnode.label, target, + target, timestamp, + target, timestamp) + result = '%s %s %s' % (cy_snode, cy_dnode, link) + return result diff --git a/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py b/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py new file mode 100644 index 00000000000..9a9f0a12ba8 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py @@ -0,0 +1,238 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 +import grpc +import json +from logging import getLogger +import time + +from . import DummyResonse +from . import client_pb2 +from . import client_pb2_grpc + + +def gen_configuration(**kwargs): + configuration = { + 'host': kwargs.get('host', 'api.diskprophet.com'), + 'user': kwargs.get('user'), + 'password': kwargs.get('password'), + 'port': kwargs.get('port', 31400), + 'mgr_inst': kwargs.get('mgr_inst', None), + 'cert_context': kwargs.get('cert_context'), + 'ssl_target_name': kwargs.get('ssl_target_name', 'api.diskprophet.com'), + 'default_authority': kwargs.get('default_authority', 'api.diskprophet.com')} + return configuration + + +class GRPcClient: + + def __init__(self, configuration): + self.auth = None + self.host = configuration.get('host') + self.port = configuration.get('port') + if configuration.get('user') and configuration.get('password'): + self.auth = ( + ('account', configuration.get('user')), + ('password', configuration.get('password'))) + self.cert_context = configuration.get('cert_context') + self.ssl_target_name = configuration.get('ssl_target_name') + self.default_authority = configuration.get('default_authority') + self.mgr_inst = configuration.get('mgr_inst') + if self.mgr_inst: + self._logger = self.mgr_inst.log + else: + self._logger = getLogger() + self._client = self._get_channel() + + def close(self): + if self._client: + self._client.close() + + @staticmethod + def connectivity_update(connectivity): + pass + + def _get_channel(self): + try: + creds = grpc.ssl_channel_credentials( + root_certificates=self.cert_context) + channel = \ + grpc.secure_channel('{}:{}'.format( + self.host, self.port), creds, + options=(('grpc.ssl_target_name_override', self.ssl_target_name,), + ('grpc.default_authority', self.default_authority),)) + channel.subscribe(self.connectivity_update, try_to_connect=True) + return channel + except Exception as e: + self._logger.error( + 'failed to create connection exception: {}'.format( + ';'.join(str(e).split('\n\t')))) + return None + + def test_connection(self): + try: + stub_accout = client_pb2_grpc.AccountStub(self._client) + result = stub_accout.AccountHeartbeat(client_pb2.Empty()) + if result and "is alive" in str(result.message): + return True + else: + return False + except Exception as e: + self._logger.error( + 'failed to test connection exception: {}'.format( + ';'.join(str(e).split('\n\t')))) + return False + + def _send_metrics(self, data, measurement): + status_info = dict() + status_info['measurement'] = None + status_info['success_count'] = 0 + status_info['failure_count'] = 0 + for dp_data in data: + d_measurement = dp_data.measurement + if not d_measurement: + status_info['measurement'] = measurement + else: + status_info['measurement'] = d_measurement + tag_list = [] + field_list = [] + for name in dp_data.tags: + tag = '{}={}'.format(name, dp_data.tags[name]) + tag_list.append(tag) + for name in dp_data.fields: + if dp_data.fields[name] is None: + continue + if isinstance(dp_data.fields[name], str): + field = '{}=\"{}\"'.format(name, dp_data.fields[name]) + elif isinstance(dp_data.fields[name], bool): + field = '{}={}'.format(name, + str(dp_data.fields[name]).lower()) + elif (isinstance(dp_data.fields[name], int) or + isinstance(dp_data.fields[name], long)): + field = '{}={}i'.format(name, dp_data.fields[name]) + else: + field = '{}={}'.format(name, dp_data.fields[name]) + field_list.append(field) + data = '{},{} {} {}'.format( + status_info['measurement'], + ','.join(tag_list), + ','.join(field_list), + int(time.time() * 1000 * 1000 * 1000)) + try: + resp = self._send_info(data=[data], measurement=status_info['measurement']) + status_code = resp.status_code + if 200 <= status_code < 300: + self._logger.debug( + '{} send diskprediction api success(ret: {})'.format( + status_info['measurement'], status_code)) + status_info['success_count'] += 1 + else: + self._logger.error( + 'return code: {}, content: {}'.format( + status_code, resp.content)) + status_info['failure_count'] += 1 + except Exception as e: + status_info['failure_count'] += 1 + self._logger.error(str(e)) + return status_info + + def _send_db_relay(self, data, measurement): + status_info = dict() + status_info['measurement'] = measurement + status_info['success_count'] = 0 + status_info['failure_count'] = 0 + for dp_data in data: + try: + resp = self._send_info( + data=[dp_data.fields['cmd']], measurement=measurement) + status_code = resp.status_code + if 200 <= status_code < 300: + self._logger.debug( + '{} send diskprediction api success(ret: {})'.format( + measurement, status_code)) + status_info['success_count'] += 1 + else: + self._logger.error( + 'return code: {}, content: {}'.format( + status_code, resp.content)) + status_info['failure_count'] += 1 + except Exception as e: + status_info['failure_count'] += 1 + self._logger.error(str(e)) + return status_info + + def send_info(self, data, measurement): + """ + :param data: data structure + :param measurement: data measurement class name + :return: + status_info = { + 'success_count': , + 'failure_count': + } + """ + if measurement == 'db_relay': + return self._send_db_relay(data, measurement) + else: + return self._send_metrics(data, measurement) + + def _send_info(self, data, measurement): + resp = DummyResonse() + try: + stub_collection = client_pb2_grpc.CollectionStub(self._client) + if measurement == 'db_relay': + result = stub_collection.PostDBRelay( + client_pb2.PostDBRelayInput(cmds=data), metadata=self.auth) + else: + result = stub_collection.PostMetrics( + client_pb2.PostMetricsInput(points=data), metadata=self.auth) + if result and 'success' in str(result.message).lower(): + resp.status_code = 200 + resp.content = '' + else: + resp.status_code = 400 + resp.content = ';'.join(str(result).split('\n\t')) + self._logger.error( + 'failed to send info: {}'.format(resp.content)) + except Exception as e: + resp.status_code = 400 + resp.content = ';'.join(str(e).split('\n\t')) + self._logger.error( + 'failed to send info exception: {}'.format(resp.content)) + return resp + + def query_info(self, host_domain_id, disk_domain_id, measurement): + resp = DummyResonse() + try: + stub_dp = client_pb2_grpc.DiskprophetStub(self._client) + predicted = stub_dp.DPGetDisksPrediction( + client_pb2.DPGetDisksPredictionInput( + physicalDiskIds=disk_domain_id), + metadata=self.auth) + if predicted and hasattr(predicted, 'data'): + resp.status_code = 200 + resp.content = '' + resp_json = json.loads(predicted.data) + rc = resp_json.get('results', []) + if rc: + series = rc[0].get('series', []) + if series: + values = series[0].get('values', []) + if not values: + resp.resp_json = {} + else: + columns = series[0].get('columns', []) + for item in values: + # get prediction key and value from server. + for name, value in zip(columns, item): + # process prediction data + resp.resp_json[name] = value + return resp + else: + resp.status_code = 400 + resp.content = '' + resp.resp_json = {'error': ';'.join(str(predicted).split('\n\t'))} + return resp + except Exception as e: + resp.status_code = 400 + resp.content = ';'.join(str(e).split('\n\t')) + resp.resp_json = {'error': resp.content} + return resp diff --git a/src/pybind/mgr/diskprediction_cloud/common/server.crt b/src/pybind/mgr/diskprediction_cloud/common/server.crt new file mode 100644 index 00000000000..d72c9d2fd18 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/common/server.crt @@ -0,0 +1,17 @@ +-----BEGIN CERTIFICATE----- +MIICsjCCAZoCCQCKLjrHOzCTrDANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDDBBh +cGkuZmVkZXJhdG9yLmFpMB4XDTE4MDgwMjA2NDg0N1oXDTI4MDczMDA2NDg0N1ow +GzEZMBcGA1UEAwwQYXBpLmZlZGVyYXRvci5haTCCASIwDQYJKoZIhvcNAQEBBQAD +ggEPADCCAQoCggEBAJkDL/VoLbI+Rc1GXkZwpN8n4e7HhIk1iK98yhXegoH8g6ZZ +uVVlUW/zNO0V5W9IgiSBqEWOEf9heWj7mIbbxl437W1LpR4V0LKR2dbY7ZMwlB3L +ZJYxtziZYu1g4Fn9hDnVJIXVmXFpF62wHd2ZSY7FyUF/OGetxLSfoOMkTHY8A8HB +92vQfoFjgx1e23lLgTO2VpucmU/qXiF+xI/K6kkrMnGJi4xBL29i3aKRRNktVUHf +Zs6JhBKl4sbvkW5m5AECW4c0XxVJotTLoPUjx4rxp0k5S1aQSYSS+0z96eVY0w8J +ungiWEj7lLqwEGKjOzfjDLsczZIcZZcQSQwb3qcCAwEAATANBgkqhkiG9w0BAQsF +AAOCAQEADwfBrHsvPmUD8CTx8lpVcqrOlHc7ftW3hb11vWwwfJw4fBiJ8DoB496x +SAP2CJyDnSLdyvVueKLjiRFBm96W76nbMeP9+CkktGRUbLjkByv/v+7WSxRrukDC +yR6IXqQJe4ADcYkVYoUMx3frBQzFtS7hni0FPvl3AN55TvTXqed61CdN9zdw9Ezn +yn0oy3BbT5h/zNHefTQBzgQhW62C5YdTRtS6VVWV/k1kLz0GVG1eMtAqueUCxFeM +g1mXYz2/Cm5C8pszZfiP+a/QV1z/3QgRUp0i0yVLiteqNDCPv6bc767VQEuXok9p +NDuKElVxdA0WD9cbnBXiyfeMOQnjQw== +-----END CERTIFICATE----- diff --git a/src/pybind/mgr/diskprediction_cloud/module.py b/src/pybind/mgr/diskprediction_cloud/module.py new file mode 100644 index 00000000000..204184ba1a1 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/module.py @@ -0,0 +1,437 @@ +""" +diskprediction with cloud predictor +""" +from __future__ import absolute_import + +from datetime import datetime +import errno +import json +from mgr_module import MgrModule +import os +from threading import Event + +from .common import DP_MGR_STAT_ENABLED, DP_MGR_STAT_DISABLED +from .task import MetricsRunner, SmartRunner, TestRunner + +TIME_DAYS = 24*60*60 +TIME_WEEK = TIME_DAYS * 7 +DP_AGENTS = [MetricsRunner, SmartRunner] + + +class Module(MgrModule): + + OPTIONS = [ + { + 'name': 'diskprediction_server', + 'default': '' + }, + { + 'name': 'diskprediction_port', + 'default': '31400' + }, + { + 'name': 'diskprediction_user', + 'default': '' + }, + { + 'name': 'diskprediction_password', + 'default': '' + }, + { + 'name': 'diskprediction_upload_metrics_interval', + 'default': '600' + }, + { + 'name': 'diskprediction_upload_smart_interval', + 'default': '43200' + }, + { + 'name': 'diskprediction_retrieve_prediction_interval', + 'default': '43200' + }, + { + 'name': 'diskprediction_cert_context', + 'default': '' + }, + { + 'name': 'diskprediction_ssl_target_name_override', + 'default': 'api.diskprophet.com' + }, + { + 'name': 'diskprediction_default_authority', + 'default': 'api.diskprophet.com' + }, + { + 'name': 'sleep_interval', + 'default': str(600), + }, + { + 'name': 'predict_interval', + 'default': str(86400), + } + ] + + COMMANDS = [ + { + 'cmd': 'device show-prediction-config', + 'desc': 'Prints diskprediction configuration', + 'perm': 'r' + }, + { + 'cmd': 'device set-cloud-prediction-config ' + 'name=server,type=CephString,req=true ' + 'name=user,type=CephString,req=true ' + 'name=password,type=CephString,req=true ' + 'name=certfile,type=CephString,req=true ' + 'name=port,type=CephString,req=false ', + 'desc': 'Configure Disk Prediction service', + 'perm': 'rw' + }, + { + 'cmd': 'device debug metrics-forced', + 'desc': 'Run metrics agent forced', + 'perm': 'r' + }, + { + 'cmd': 'device debug smart-forced', + 'desc': 'Run smart agent forced', + 'perm': 'r' + }, + { + 'cmd': 'diskprediction_cloud status', + 'desc': 'Check diskprediction_cloud status', + 'perm': 'r' + } + ] + + def __init__(self, *args, **kwargs): + super(Module, self).__init__(*args, **kwargs) + self.status = {'status': DP_MGR_STAT_DISABLED} + self.shutdown_event = Event() + self._agents = [] + self._activated_cloud = False + self._prediction_result = {} + self.config = dict() + + @property + def config_keys(self): + return dict((o['name'], o.get('default', None)) for o in self.OPTIONS) + + def set_config_option(self, option, value): + if option not in self.config_keys.keys(): + raise RuntimeError('{0} is a unknown configuration ' + 'option'.format(option)) + + if option in ['diskprediction_port', + 'diskprediction_upload_metrics_interval', + 'diskprediction_upload_smart_interval', + 'diskprediction_retrieve_prediction_interval']: + if not str(value).isdigit(): + raise RuntimeError('invalid {} configured. Please specify ' + 'a valid integer {}'.format(option, value)) + + self.log.debug('Setting in-memory config option %s to: %s', option, + value) + self.set_config(option, value) + self.config[option] = value + + return True + + def get_configuration(self, key): + return self.get_config(key, self.config_keys[key]) + + @staticmethod + def _convert_timestamp(predicted_timestamp, life_expectancy_day): + """ + :param predicted_timestamp: unit is nanoseconds + :param life_expectancy_day: unit is seconds + :return: + date format '%Y-%m-%d' ex. 2018-01-01 + """ + return datetime.fromtimestamp( + predicted_timestamp / (1000 ** 3) + life_expectancy_day).strftime('%Y-%m-%d') + + def _show_prediction_config(self, cmd): + self.show_module_config() + return 0, json.dumps(self.config, indent=4), '' + + def _set_ssl_target_name(self, cmd): + str_ssl_target = cmd.get('ssl_target_name', '') + try: + self.set_config('diskprediction_ssl_target_name_override', str_ssl_target) + return (0, + 'success to config ssl target name', 0) + except Exception as e: + return -errno.EINVAL, '', str(e) + + def _set_ssl_default_authority(self, cmd): + str_ssl_authority = cmd.get('ssl_authority', '') + try: + self.set_config('diskprediction_default_authority', str_ssl_authority) + return 0, 'success to config ssl default authority', 0 + except Exception as e: + return -errno.EINVAL, '', str(e) + + def _set_cloud_prediction_config(self, cmd): + str_cert_path = cmd.get('certfile', '') + if os.path.exists(str_cert_path): + with open(str_cert_path, 'rb') as f: + trusted_certs = f.read() + self.set_config_option( + 'diskprediction_cert_context', trusted_certs) + for _agent in self._agents: + _agent.event.set() + self.set_config('diskprediction_server', cmd['server']) + self.set_config('diskprediction_user', cmd['user']) + self.set_config('diskprediction_password', cmd['password']) + if cmd.get('port'): + self.set_config('diskprediction_port', cmd['port']) + return 0, 'succeed to config cloud mode connection', '' + else: + return -errno.EINVAL, '', 'certification file not existed' + + def _debug_metrics_forced(self, cmd): + msg = '' + for _agent in self._agents: + if isinstance(_agent, MetricsRunner): + msg = 'run metrics agent successfully' + _agent.event.set() + return 0, msg, '' + + def _debug_smart_forced(self, cmd): + msg = ' ' + for _agent in self._agents: + if isinstance(_agent, SmartRunner): + msg = 'run smart agent successfully' + _agent.event.set() + return 0, msg, '' + + def refresh_config(self): + for opt in self.OPTIONS: + setattr(self, + opt['name'], + self.get_config(opt['name']) or opt['default']) + self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name'])) + + def _status(self, cmd): + return 0, json.dumps(self.status), '' + + def _get_cloud_prediction_result(self, host_domain_id, disk_domain_id): + result = {} + obj_sender = None + from .common.grpcclient import GRPcClient, gen_configuration + conf = gen_configuration( + host=self.get_configuration('diskprediction_server'), + user=self.get_configuration('diskprediction_user'), + password=self.get_configuration('diskprediction_password'), + port=self.get_configuration('diskprediction_port'), + cert_context=self.get_configuration('diskprediction_cert_context'), + mgr_inst=self, + ssl_target_name=self.get_configuration('diskprediction_ssl_target_name_override'), + default_authority=self.get_configuration('diskprediction_default_authority')) + try: + obj_sender = GRPcClient(conf) + if obj_sender: + query_info = obj_sender.query_info(host_domain_id, disk_domain_id, 'sai_disk_prediction') + status_code = query_info.status_code + if status_code == 200: + result = query_info.json() + else: + resp = query_info.json() + if resp.get('error'): + self.log.error(str(resp['error'])) + finally: + if obj_sender: + obj_sender.close() + return result + + def predict_life_expectancy(self, devid): + assert devid + self.refresh_config() + prediction_data = {} + result = self.get('device {}'.format(devid)) + if not result: + return -1, '', 'device {} not found'.format(devid) + dev_info = result.get('device', {}) + if not dev_info: + return -1, '', 'device {} not found'.format(devid) + cluster_id = self.get('mon_map').get('fsid', '') + for location in dev_info.get('location', []): + host = location.get('host') + host_domain_id = '{}_{}'.format(cluster_id, host) + prediction_data = self._get_cloud_prediction_result(host_domain_id, devid) + if prediction_data: + break + if not prediction_data: + return -1, '', 'device {} prediction data not ready'.format(devid) + elif prediction_data.get('near_failure', '').lower() == 'good': + return 0, '>6w', '' + elif prediction_data.get('near_failure', '').lower() == 'warning': + return 0, '>=2w and <=6w', '' + elif prediction_data.get('near_failure', '').lower() == 'bad': + return 0, '<2w', '' + else: + return 0, 'unknown', '' + + def _update_device_life_expectancy_day(self, devid, prediction): + # Update osd life-expectancy + from .common.clusterdata import ClusterAPI + predicted = None + life_expectancy_day_min = None + life_expectancy_day_max = None + if prediction.get('predicted'): + predicted = int(prediction['predicted']) + if prediction.get('near_failure'): + if prediction['near_failure'].lower() == 'good': + life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS + life_expectancy_day_max = None + elif prediction['near_failure'].lower() == 'warning': + life_expectancy_day_min = (TIME_WEEK * 2) + life_expectancy_day_max = (TIME_WEEK * 6) + elif prediction['near_failure'].lower() == 'bad': + life_expectancy_day_min = 0 + life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS + else: + # Near failure state is unknown. + predicted = None + life_expectancy_day_min = None + life_expectancy_day_max = None + + obj_api = ClusterAPI(self) + if predicted and devid and life_expectancy_day_min is not None: + from_date = None + to_date = None + try: + if life_expectancy_day_min is not None: + from_date = self._convert_timestamp(predicted, life_expectancy_day_min) + + if life_expectancy_day_max is not None: + to_date = self._convert_timestamp(predicted, life_expectancy_day_max) + + obj_api.set_device_life_expectancy(devid, from_date, to_date) + self.log.info( + 'succeed to set device {} life expectancy from: {}, to: {}'.format( + devid, from_date, to_date)) + except Exception as e: + self.log.error( + 'failed to set device {} life expectancy from: {}, to: {}, {}'.format( + devid, from_date, to_date, str(e))) + else: + obj_api.reset_device_life_expectancy(devid) + + def predict_all_devices(self): + if not self._activated_cloud: + return -1, '', 'diskprecition_cloud not ready' + prediction_data = {} + self.refresh_config() + result = self.get('devices') + cluster_id = self.get('mon_map').get('fsid') + if not result: + return -1, '', 'unable to get all devices for prediction' + for dev in result.get('devices', []): + for location in dev.get('location', []): + host = location.get('host') + host_domain_id = '{}_{}'.format(cluster_id, host) + prediction_data = self._get_cloud_prediction_result(host_domain_id, dev.get('devid')) + if prediction_data: + break + if not prediction_data: + return -1, '', 'device {} prediction data not ready'.format(dev.get('devid')) + else: + self._update_device_life_expectancy_day(dev.get('devid'), prediction_data) + return 0, '', '' + + def handle_command(self, _, cmd): + for o_cmd in self.COMMANDS: + if cmd['prefix'] == o_cmd['cmd'][:len(cmd['prefix'])]: + fun_name = '' + avgs = o_cmd['cmd'].split(' ') + for avg in avgs: + if avg.lower() == 'diskprediction_cloud': + continue + if avg.lower() == 'device': + continue + if '=' in avg or ',' in avg or not avg: + continue + fun_name += '_%s' % avg.replace('-', '_') + if fun_name: + fun = getattr( + self, fun_name) + if fun: + return fun(cmd) + return -errno.EINVAL, '', 'cmd not found' + + def show_module_config(self): + for key, default in self.config_keys.items(): + self.set_config_option(key, self.get_config(key, default)) + + def serve(self): + self.log.info('Starting diskprediction module') + self.status = {'status': DP_MGR_STAT_ENABLED} + + while True: + self.refresh_config() + mode = self.get_option('device_failure_prediction_mode') + if mode == 'cloud': + if not self._activated_cloud: + self.start_cloud_disk_prediction() + else: + if self._activated_cloud: + self.stop_disk_prediction() + + # Check agent hang is? + restartAgent = False + try: + for dp_agent in self._agents: + if dp_agent.is_timeout(): + self.log.error('agent name: {] timeout'.format(dp_agent.task_name)) + restartAgent = True + break + except Exception as IOError: + self.log.error('disk prediction plugin faield to started and try to restart') + restartAgent = True + + if restartAgent: + self.stop_disk_prediction() + else: + sleep_interval = int(self.sleep_interval) or 60 + self.shutdown_event.wait(sleep_interval) + if self.shutdown_event.is_set(): + break + self.stop_disk_prediction() + + def start_cloud_disk_prediction(self): + assert not self._activated_cloud + for dp_agent in DP_AGENTS: + obj_agent = dp_agent(self, 300) + if obj_agent: + obj_agent.start() + else: + raise Exception('failed to start task %s' % obj_agent.task_name) + self._agents.append(obj_agent) + self._activated_cloud = True + self.log.info('start cloud disk prediction') + + def stop_disk_prediction(self): + assert self._activated_cloud + try: + self.status = {'status': DP_MGR_STAT_DISABLED} + while self._agents: + dp_agent = self._agents.pop() + self.log.info('agent name: {}'.format(dp_agent.task_name)) + dp_agent.terminate() + dp_agent.join(5) + del dp_agent + self._activated_cloud = False + self.log.info('stop disk prediction') + except Exception as IOError: + self.log.error('failed to stop disk prediction clould plugin') + + def shutdown(self): + self.shutdown_event.set() + super(Module, self).shutdown() + + def self_test(self): + objTest = TestRunner(self) + objTest.run() + self.log.info('self test completed') diff --git a/src/pybind/mgr/diskprediction_cloud/requirements.txt b/src/pybind/mgr/diskprediction_cloud/requirements.txt new file mode 100644 index 00000000000..3a22a072eba --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/requirements.txt @@ -0,0 +1,14 @@ +google==2.0.1 +google-api-python-client==1.7.3 +google-auth==1.5.0 +google-auth-httplib2==0.0.3 +google-gax==0.12.5 +googleapis-common-protos==1.5.3 +grpc==0.3.post19 +grpc-google-logging-v2==0.8.1 +grpc-google-pubsub-v1==0.8.1 +grpcio==1.14.1 +mock==2.0.0 +numpy==1.15.1 +scikit-learn==0.19.2 +scipy==1.1.0 diff --git a/src/pybind/mgr/diskprediction_cloud/task.py b/src/pybind/mgr/diskprediction_cloud/task.py new file mode 100644 index 00000000000..d03450ed614 --- /dev/null +++ b/src/pybind/mgr/diskprediction_cloud/task.py @@ -0,0 +1,171 @@ +from __future__ import absolute_import + +import time +from threading import Event, Thread + +from .agent.metrics.ceph_cluster import CephClusterAgent +from .agent.metrics.ceph_mon_osd import CephMonOsdAgent +from .agent.metrics.ceph_pool import CephPoolAgent +from .agent.metrics.db_relay import DBRelayAgent +from .agent.metrics.sai_agent import SAIAgent +from .agent.metrics.sai_cluster import SAICluserAgent +from .agent.metrics.sai_disk import SAIDiskAgent +from .agent.metrics.sai_disk_smart import SAIDiskSmartAgent +from .agent.metrics.sai_host import SAIHostAgent +from .common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK, DP_MGR_STAT_WARNING + + +class AgentRunner(Thread): + + task_name = '' + interval_key = '' + agents = [] + + def __init__(self, mgr_module, agent_timeout=60): + """ + + :param mgr_module: parent ceph mgr module + :param agent_timeout: (unit seconds) agent execute timeout value, default: 60 secs + """ + Thread.__init__(self) + self._agent_timeout = agent_timeout + self._module_inst = mgr_module + self._log = mgr_module.log + self._start_time = time.time() + self._th = None + + self.exit = False + self.event = Event() + self.task_interval = \ + int(self._module_inst.get_configuration(self.interval_key)) + + def terminate(self): + self.exit = True + self.event.set() + self._log.info('PDS terminate %s complete' % self.task_name) + + def run(self): + self._start_time = time.time() + self._log.info( + 'start %s, interval: %s' + % (self.task_name, self.task_interval)) + while not self.exit: + self.run_agents() + if self.event: + self.event.wait(int(self.task_interval)) + self.event.clear() + self._log.info( + 'completed %s(%s)' % (self.task_name, time.time()-self._start_time)) + + def run_agents(self): + obj_sender = None + try: + self._log.debug('run_agents %s' % self.task_name) + from .common.grpcclient import GRPcClient, gen_configuration + conf = gen_configuration( + host=self._module_inst.get_configuration('diskprediction_server'), + user=self._module_inst.get_configuration('diskprediction_user'), + password=self._module_inst.get_configuration( + 'diskprediction_password'), + port=self._module_inst.get_configuration('diskprediction_port'), + cert_context=self._module_inst.get_configuration('diskprediction_cert_context'), + mgr_inst=self._module_inst, + ssl_target_name=self._module_inst.get_configuration('diskprediction_ssl_target_name_override'), + default_authority=self._module_inst.get_configuration('diskprediction_default_authority')) + obj_sender = GRPcClient(conf) + if not obj_sender: + self._log.error('invalid diskprediction sender') + self._module_inst.status = \ + {'status': DP_MGR_STAT_FAILED, + 'reason': 'invalid diskprediction sender'} + raise Exception('invalid diskprediction sender') + if obj_sender.test_connection(): + self._module_inst.status = {'status': DP_MGR_STAT_OK} + self._log.debug('succeed to test connection') + self._run(self._module_inst, obj_sender) + else: + self._log.error('failed to test connection') + self._module_inst.status = \ + {'status': DP_MGR_STAT_FAILED, + 'reason': 'failed to test connection'} + raise Exception('failed to test connection') + except Exception as e: + self._module_inst.status = \ + {'status': DP_MGR_STAT_FAILED, + 'reason': 'failed to start %s agents, %s' + % (self.task_name, str(e))} + self._log.error( + 'failed to start %s agents, %s' % (self.task_name, str(e))) + raise + finally: + if obj_sender: + obj_sender.close() + + def is_timeout(self): + now = time.time() + if (now - self._start_time) > self._agent_timeout: + return True + else: + return False + + def _run(self, module_inst, sender): + self._log.debug('%s run' % self.task_name) + for agent in self.agents: + self._start_time = time.time() + retry_count = 3 + while retry_count: + retry_count -= 1 + try: + obj_agent = agent(module_inst, sender, self._agent_timeout) + obj_agent.run() + del obj_agent + break + except Exception as e: + if str(e).find('configuring') >= 0: + self._log.debug( + 'failed to execute {}, {}, retry again.'.format( + agent.measurement, str(e))) + time.sleep(1) + continue + else: + module_inst.status = \ + {'status': DP_MGR_STAT_WARNING, + 'reason': 'failed to execute {}, {}'.format( + agent.measurement, ';'.join(str(e).split('\n\t')))} + self._log.warning( + 'failed to execute {}, {}'.format( + agent.measurement, ';'.join(str(e).split('\n\t')))) + break + + +class MetricsRunner(AgentRunner): + + task_name = 'Metrics Agent' + interval_key = 'diskprediction_upload_metrics_interval' + agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent, + SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent, + SAIAgent] + + +class SmartRunner(AgentRunner): + + task_name = 'Smart data Agent' + interval_key = 'diskprediction_upload_smart_interval' + agents = [SAIDiskSmartAgent] + + +class TestRunner(object): + task_name = 'Test Agent' + interval_key = 'diskprediction_upload_metrics_interval' + agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent, + SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent, + SAIAgent, SAIDiskSmartAgent] + + def __init__(self, mgr_module): + self._module_inst = mgr_module + + def run(self): + for agent in self.agents: + obj_agent = agent(self._module_inst, None) + obj_agent.run() + del obj_agent