]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
mgr: Separate diskprediction cloud plugin from the diskprediction plugin
authorhsiang41 <rick.chen@prophetstor.com>
Wed, 7 Nov 2018 14:05:35 +0000 (22:05 +0800)
committerSage Weil <sage@redhat.com>
Fri, 16 Nov 2018 06:15:41 +0000 (00:15 -0600)
Separate diskprediction local cloud from the diskprediction plugin.
Devicehealth invoke device prediction function related on the global
configuration "device_failure_prediction_mode".

Signed-off-by: Rick Chen <rick.chen@prophetstor.com>
128 files changed:
doc/mgr/diskprediction.rst
qa/tasks/mgr/test_module_selftest.py
src/pybind/mgr/diskprediction/__init__.py [deleted file]
src/pybind/mgr/diskprediction/agent/__init__.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/__init__.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/ceph_cluster.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/ceph_mon_osd.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/ceph_pool.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/db_relay.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/sai_agent.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/sai_cluster.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/sai_disk.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/sai_disk_smart.py [deleted file]
src/pybind/mgr/diskprediction/agent/metrics/sai_host.py [deleted file]
src/pybind/mgr/diskprediction/agent/predict/__init__.py [deleted file]
src/pybind/mgr/diskprediction/agent/predict/prediction.py [deleted file]
src/pybind/mgr/diskprediction/common/__init__.py [deleted file]
src/pybind/mgr/diskprediction/common/client_pb2.py [deleted file]
src/pybind/mgr/diskprediction/common/client_pb2_grpc.py [deleted file]
src/pybind/mgr/diskprediction/common/clusterdata.py [deleted file]
src/pybind/mgr/diskprediction/common/cypher.py [deleted file]
src/pybind/mgr/diskprediction/common/grpcclient.py [deleted file]
src/pybind/mgr/diskprediction/common/localpredictor.py [deleted file]
src/pybind/mgr/diskprediction/module.py [deleted file]
src/pybind/mgr/diskprediction/predictor/__init__.py [deleted file]
src/pybind/mgr/diskprediction/predictor/disk_failure_predictor.py [deleted file]
src/pybind/mgr/diskprediction/predictor/models/config.json [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_1.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_10.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_104.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_105.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_109.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_112.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_114.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_115.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_118.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_119.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_12.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_120.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_123.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_124.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_125.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_128.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_131.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_134.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_138.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_14.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_141.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_145.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_151.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_16.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_161.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_168.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_169.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_174.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_18.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_182.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_185.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_186.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_195.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_201.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_204.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_206.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_208.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_210.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_212.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_213.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_219.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_221.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_222.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_223.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_225.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_227.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_229.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_230.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_234.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_235.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_236.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_239.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_243.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_27.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_3.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_33.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_36.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_44.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_50.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_57.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_59.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_6.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_61.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_62.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_67.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_69.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_71.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_72.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_78.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_79.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_82.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_85.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_88.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_93.pkl [deleted file]
src/pybind/mgr/diskprediction/predictor/models/svm_97.pkl [deleted file]
src/pybind/mgr/diskprediction/requirements.txt [deleted file]
src/pybind/mgr/diskprediction/task.py [deleted file]
src/pybind/mgr/diskprediction/test/__init__.py [deleted file]
src/pybind/mgr/diskprediction/test/test_agents.py [deleted file]
src/pybind/mgr/diskprediction_cloud/__init__.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/__init__.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/common/__init__.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/common/client_pb2.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/common/clusterdata.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/common/cypher.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/common/grpcclient.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/common/server.crt [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/module.py [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/requirements.txt [new file with mode: 0644]
src/pybind/mgr/diskprediction_cloud/task.py [new file with mode: 0644]

index bbc163f5c2eefb734d2773d125aa03b1f78b3de1..eefe6ad8527c838fcc87e5178391ae7fc8d9534b 100644 (file)
@@ -12,7 +12,8 @@ Enabling
 Run the following command to enable the *diskprediction* module in the Ceph
 environment::
 
-    ceph mgr module enable diskprediction
+    ceph mgr module enable diskprediction_cloud
+    ceph mgr module enable diskprediction_local
 
 
 Select the prediction mode::
index 780f4751239401761780b52173e6b12e8552d11f..26673971b64ee168964f6c09e2276c7b8ac47efd 100644 (file)
@@ -47,8 +47,11 @@ class TestModuleSelftest(MgrTestCase):
     def test_influx(self):
         self._selftest_plugin("influx")
 
-    def test_diskprediction(self):
-        self._selftest_plugin("diskprediction")
+    def test_diskprediction_local(self):
+        self._selftest_plugin("diskprediction_local")
+
+    def test_diskprediction_cloud(self):
+        self._selftest_plugin("diskprediction_cloud")
 
     def test_telegraf(self):
         self._selftest_plugin("telegraf")
diff --git a/src/pybind/mgr/diskprediction/__init__.py b/src/pybind/mgr/diskprediction/__init__.py
deleted file mode 100644 (file)
index e65bbfb..0000000
+++ /dev/null
@@ -1,2 +0,0 @@
-from __future__ import absolute_import
-from .module import Module
diff --git a/src/pybind/mgr/diskprediction/agent/__init__.py b/src/pybind/mgr/diskprediction/agent/__init__.py
deleted file mode 100644 (file)
index 64a456f..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-from __future__ import absolute_import\r
-\r
-from ..common import timeout, TimeoutError\r
-\r
-\r
-class BaseAgent(object):\r
-\r
-    measurement = ''\r
-\r
-    def __init__(self, mgr_module, obj_sender, timeout=30):\r
-        self.data = []\r
-        self._client = None\r
-        self._client = obj_sender\r
-        self._logger = mgr_module.log\r
-        self._module_inst = mgr_module\r
-        self._timeout = timeout\r
-\r
-    def run(self):\r
-        try:\r
-            self._collect_data()\r
-            self._run()\r
-        except TimeoutError:\r
-            self._logger.error('{} failed to execute {} task'.format(\r
-                __name__, self.measurement))\r
-\r
-    def __nonzero__(self):\r
-        if not self._module_inst and not self._client:\r
-            return False\r
-        else:\r
-            return True\r
-\r
-    @timeout()\r
-    def _run(self):\r
-        pass\r
-\r
-    @timeout()\r
-    def _collect_data(self):\r
-        pass\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/__init__.py b/src/pybind/mgr/diskprediction/agent/metrics/__init__.py
deleted file mode 100644 (file)
index 57fbfd5..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-from __future__ import absolute_import\r
-\r
-from .. import BaseAgent\r
-from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING, DP_MGR_STAT_OK\r
-\r
-AGENT_VERSION = '1.0.0'\r
-\r
-\r
-class MetricsField(object):\r
-    def __init__(self):\r
-        self.tags = {}\r
-        self.fields = {}\r
-        self.timestamp = None\r
-\r
-    def __str__(self):\r
-        return str({\r
-            'tags': self.tags,\r
-            'fields': self.fields,\r
-            'timestamp': self.timestamp\r
-        })\r
-\r
-\r
-class MetricsAgent(BaseAgent):\r
-\r
-    def log_summary(self, status_info):\r
-        try:\r
-            if status_info:\r
-                measurement = status_info['measurement']\r
-                success_count = status_info['success_count']\r
-                failure_count = status_info['failure_count']\r
-                total_count = success_count + failure_count\r
-                display_string = \\r
-                    '%s agent stats in total count: %s, success count: %s, failure count: %s.'\r
-                self._logger.info(\r
-                    display_string % (measurement, total_count, success_count, failure_count)\r
-                )\r
-        except Exception as e:\r
-            self._logger.error(str(e))\r
-\r
-    def _run(self):\r
-        collect_data = self.data\r
-        result = {}\r
-        if collect_data:\r
-            status_info = self._client.send_info(collect_data, self.measurement)\r
-            # show summary info\r
-            self.log_summary(status_info)\r
-            # write sub_agent buffer\r
-            total_count = status_info['success_count'] + status_info['failure_count']\r
-            if total_count:\r
-                if status_info['success_count'] == 0:\r
-                    self._module_inst.status = \\r
-                        {'status': DP_MGR_STAT_FAILED,\r
-                         'reason': 'failed to send metrics data to the server'}\r
-                elif status_info['failure_count'] == 0:\r
-                    self._module_inst.status = \\r
-                        {'status': DP_MGR_STAT_OK}\r
-                else:\r
-                    self._module_inst.status = \\r
-                        {'status': DP_MGR_STAT_WARNING,\r
-                         'reason': 'failed to send partial metrics data to the server'}\r
-        return result\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/ceph_cluster.py b/src/pybind/mgr/diskprediction/agent/metrics/ceph_cluster.py
deleted file mode 100644 (file)
index d49b063..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class CephCluster(MetricsField):\r
-    """ Ceph cluster structure """\r
-    measurement = 'ceph_cluster'\r
-\r
-    def __init__(self):\r
-        super(CephCluster, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['cluster_health'] = ''\r
-        self.fields['num_mon'] = None\r
-        self.fields['num_mon_quorum'] = None\r
-        self.fields['num_osd'] = None\r
-        self.fields['num_osd_up'] = None\r
-        self.fields['num_osd_in'] = None\r
-        self.fields['osd_epoch'] = None\r
-        self.fields['osd_bytes'] = None\r
-        self.fields['osd_bytes_used'] = None\r
-        self.fields['osd_bytes_avail'] = None\r
-        self.fields['num_pool'] = None\r
-        self.fields['num_pg'] = None\r
-        self.fields['num_pg_active_clean'] = None\r
-        self.fields['num_pg_active'] = None\r
-        self.fields['num_pg_peering'] = None\r
-        self.fields['num_object'] = None\r
-        self.fields['num_object_degraded'] = None\r
-        self.fields['num_object_misplaced'] = None\r
-        self.fields['num_object_unfound'] = None\r
-        self.fields['num_bytes'] = None\r
-        self.fields['num_mds_up'] = None\r
-        self.fields['num_mds_in'] = None\r
-        self.fields['num_mds_failed'] = None\r
-        self.fields['mds_epoch'] = None\r
-\r
-\r
-class CephClusterAgent(MetricsAgent):\r
-    measurement = 'ceph_cluster'\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        cluster_id = obj_api.get_cluster_id()\r
-\r
-        c_data = CephCluster()\r
-        cluster_state = obj_api.get_health_status()\r
-        c_data.tags['cluster_id'] = cluster_id\r
-        c_data.fields['cluster_health'] = str(cluster_state)\r
-        c_data.fields['agenthost'] = socket.gethostname()\r
-        c_data.tags['agenthost_domain_id'] = \\r
-            '%s_%s' % (cluster_id, c_data.fields['agenthost'])\r
-        c_data.fields['osd_epoch'] = obj_api.get_osd_epoch()\r
-        c_data.fields['num_mon'] = len(obj_api.get_mons())\r
-        c_data.fields['num_mon_quorum'] = \\r
-            len(obj_api.get_mon_status().get('quorum', []))\r
-\r
-        osds = obj_api.get_osds()\r
-        num_osd_up = 0\r
-        num_osd_in = 0\r
-        for osd_data in osds:\r
-            if osd_data.get('up'):\r
-                num_osd_up = num_osd_up + 1\r
-            if osd_data.get('in'):\r
-                num_osd_in = num_osd_in + 1\r
-        if osds:\r
-            c_data.fields['num_osd'] = len(osds)\r
-        else:\r
-            c_data.fields['num_osd'] = 0\r
-        c_data.fields['num_osd_up'] = num_osd_up\r
-        c_data.fields['num_osd_in'] = num_osd_in\r
-        c_data.fields['num_pool'] = len(obj_api.get_osd_pools())\r
-\r
-        df_stats = obj_api.get_df_stats()\r
-        total_bytes = df_stats.get('total_bytes', 0)\r
-        total_used_bytes = df_stats.get('total_used_bytes', 0)\r
-        total_avail_bytes = df_stats.get('total_avail_bytes', 0)\r
-        c_data.fields['osd_bytes'] = total_bytes\r
-        c_data.fields['osd_bytes_used'] = total_used_bytes\r
-        c_data.fields['osd_bytes_avail'] = total_avail_bytes\r
-        if total_bytes and total_avail_bytes:\r
-            c_data.fields['osd_bytes_used_percentage'] = \\r
-                round(float(total_used_bytes) / float(total_bytes) * 100, 4)\r
-        else:\r
-            c_data.fields['osd_bytes_used_percentage'] = 0.0000\r
-\r
-        pg_stats = obj_api.get_pg_stats()\r
-        num_bytes = 0\r
-        num_object = 0\r
-        num_object_degraded = 0\r
-        num_object_misplaced = 0\r
-        num_object_unfound = 0\r
-        num_pg_active = 0\r
-        num_pg_active_clean = 0\r
-        num_pg_peering = 0\r
-        for pg_data in pg_stats:\r
-            num_pg_active = num_pg_active + len(pg_data.get('acting'))\r
-            if 'active+clean' in pg_data.get('state'):\r
-                num_pg_active_clean = num_pg_active_clean + 1\r
-            if 'peering' in pg_data.get('state'):\r
-                num_pg_peering = num_pg_peering + 1\r
-\r
-            stat_sum = pg_data.get('stat_sum', {})\r
-            num_object = num_object + stat_sum.get('num_objects', 0)\r
-            num_object_degraded = \\r
-                num_object_degraded + stat_sum.get('num_objects_degraded', 0)\r
-            num_object_misplaced = \\r
-                num_object_misplaced + stat_sum.get('num_objects_misplaced', 0)\r
-            num_object_unfound = \\r
-                num_object_unfound + stat_sum.get('num_objects_unfound', 0)\r
-            num_bytes = num_bytes + stat_sum.get('num_bytes', 0)\r
-\r
-        c_data.fields['num_pg'] = len(pg_stats)\r
-        c_data.fields['num_object'] = num_object\r
-        c_data.fields['num_object_degraded'] = num_object_degraded\r
-        c_data.fields['num_object_misplaced'] = num_object_misplaced\r
-        c_data.fields['num_object_unfound'] = num_object_unfound\r
-        c_data.fields['num_bytes'] = num_bytes\r
-        c_data.fields['num_pg_active'] = num_pg_active\r
-        c_data.fields['num_pg_active_clean'] = num_pg_active_clean\r
-        c_data.fields['num_pg_peering'] = num_pg_active_clean\r
-\r
-        filesystems = obj_api.get_file_systems()\r
-        num_mds_in = 0\r
-        num_mds_up = 0\r
-        num_mds_failed = 0\r
-        mds_epoch = 0\r
-        for fs_data in filesystems:\r
-            num_mds_in = \\r
-                num_mds_in + len(fs_data.get('mdsmap', {}).get('in', []))\r
-            num_mds_up = \\r
-                num_mds_up + len(fs_data.get('mdsmap', {}).get('up', {}))\r
-            num_mds_failed = \\r
-                num_mds_failed + len(fs_data.get('mdsmap', {}).get('failed', []))\r
-            mds_epoch = mds_epoch + fs_data.get('mdsmap', {}).get('epoch', 0)\r
-        c_data.fields['num_mds_in'] = num_mds_in\r
-        c_data.fields['num_mds_up'] = num_mds_up\r
-        c_data.fields['num_mds_failed'] = num_mds_failed\r
-        c_data.fields['mds_epoch'] = mds_epoch\r
-        self.data.append(c_data)\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/ceph_mon_osd.py b/src/pybind/mgr/diskprediction/agent/metrics/ceph_mon_osd.py
deleted file mode 100644 (file)
index 0c85fb5..0000000
+++ /dev/null
@@ -1,159 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class CephMON(MetricsField):\r
-    """ Ceph monitor structure """\r
-    measurement = 'ceph_mon'\r
-\r
-    def __init__(self):\r
-        super(CephMON, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.tags['mon_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['num_sessions'] = None\r
-        self.fields['session_add'] = None\r
-        self.fields['session_rm'] = None\r
-        self.fields['session_trim'] = None\r
-        self.fields['num_elections'] = None\r
-        self.fields['election_call'] = None\r
-        self.fields['election_win'] = None\r
-        self.fields['election_lose'] = None\r
-\r
-\r
-class CephOSD(MetricsField):\r
-    """ Ceph osd structure """\r
-    measurement = 'ceph_osd'\r
-\r
-    def __init__(self):\r
-        super(CephOSD, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.tags['osd_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['host_domain_id'] = None\r
-        self.fields['op_w'] = None\r
-        self.fields['op_in_bytes'] = None\r
-        self.fields['op_r'] = None\r
-        self.fields['op_out_bytes'] = None\r
-        self.fields['op_wip'] = None\r
-        self.fields['op_latency'] = None\r
-        self.fields['op_process_latency'] = None\r
-        self.fields['op_r_latency'] = None\r
-        self.fields['op_r_process_latency'] = None\r
-        self.fields['op_w_in_bytes'] = None\r
-        self.fields['op_w_latency'] = None\r
-        self.fields['op_w_process_latency'] = None\r
-        self.fields['op_w_prepare_latency'] = None\r
-        self.fields['op_rw'] = None\r
-        self.fields['op_rw_in_bytes'] = None\r
-        self.fields['op_rw_out_bytes'] = None\r
-        self.fields['op_rw_latency'] = None\r
-        self.fields['op_rw_process_latency'] = None\r
-        self.fields['op_rw_prepare_latency'] = None\r
-        self.fields['op_before_queue_op_lat'] = None\r
-        self.fields['op_before_dequeue_op_lat'] = None\r
-\r
-\r
-class CephMonOsdAgent(MetricsAgent):\r
-    measurement = 'ceph_mon_osd'\r
-\r
-    # counter types\r
-    PERFCOUNTER_LONGRUNAVG = 4\r
-    PERFCOUNTER_COUNTER = 8\r
-    PERFCOUNTER_HISTOGRAM = 0x10\r
-    PERFCOUNTER_TYPE_MASK = ~3\r
-\r
-    def _stattype_to_str(self, stattype):\r
-        typeonly = stattype & self.PERFCOUNTER_TYPE_MASK\r
-        if typeonly == 0:\r
-            return 'gauge'\r
-        if typeonly == self.PERFCOUNTER_LONGRUNAVG:\r
-            # this lie matches the DaemonState decoding: only val, no counts\r
-            return 'counter'\r
-        if typeonly == self.PERFCOUNTER_COUNTER:\r
-            return 'counter'\r
-        if typeonly == self.PERFCOUNTER_HISTOGRAM:\r
-            return 'histogram'\r
-        return ''\r
-\r
-    def _generate_osd(self, cluster_id, service_name, perf_counts):\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        service_id = service_name[4:]\r
-        d_osd = CephOSD()\r
-        stat_bytes = 0\r
-        stat_bytes_used = 0\r
-        d_osd.tags['cluster_id'] = cluster_id\r
-        d_osd.tags['osd_id'] = service_name[4:]\r
-        d_osd.fields['agenthost'] = socket.gethostname()\r
-        d_osd.tags['agenthost_domain_id'] = \\r
-            '%s_%s' % (cluster_id, d_osd.fields['agenthost'])\r
-        d_osd.tags['host_domain_id'] = \\r
-            '%s_%s' % (cluster_id,\r
-                       obj_api.get_osd_hostname(d_osd.tags['osd_id']))\r
-        for i_key, i_val in perf_counts.iteritems():\r
-            if i_key[:4] == 'osd.':\r
-                key_name = i_key[4:]\r
-            else:\r
-                key_name = i_key\r
-            if self._stattype_to_str(i_val['type']) == 'counter':\r
-                value = obj_api.get_rate('osd', service_id, i_key)\r
-            else:\r
-                value = obj_api.get_latest('osd', service_id, i_key)\r
-            if key_name == 'stat_bytes':\r
-                stat_bytes = value\r
-            elif key_name == 'stat_bytes_used':\r
-                stat_bytes_used = value\r
-            else:\r
-                d_osd.fields[key_name] = value\r
-\r
-        if stat_bytes and stat_bytes_used:\r
-            d_osd.fields['stat_bytes_used_percentage'] = \\r
-                round(float(stat_bytes_used) / float(stat_bytes) * 100, 4)\r
-        else:\r
-            d_osd.fields['stat_bytes_used_percentage'] = 0.0000\r
-        self.data.append(d_osd)\r
-\r
-    def _generate_mon(self, cluster_id, service_name, perf_counts):\r
-        d_mon = CephMON()\r
-        d_mon.tags['cluster_id'] = cluster_id\r
-        d_mon.tags['mon_id'] = service_name[4:]\r
-        d_mon.fields['agenthost'] = socket.gethostname()\r
-        d_mon.tags['agenthost_domain_id'] = \\r
-            '%s_%s' % (cluster_id, d_mon.fields['agenthost'])\r
-        d_mon.fields['num_sessions'] = \\r
-            perf_counts.get('mon.num_sessions', {}).get('value', 0)\r
-        d_mon.fields['session_add'] = \\r
-            perf_counts.get('mon.session_add', {}).get('value', 0)\r
-        d_mon.fields['session_rm'] = \\r
-            perf_counts.get('mon.session_rm', {}).get('value', 0)\r
-        d_mon.fields['session_trim'] = \\r
-            perf_counts.get('mon.session_trim', {}).get('value', 0)\r
-        d_mon.fields['num_elections'] = \\r
-            perf_counts.get('mon.num_elections', {}).get('value', 0)\r
-        d_mon.fields['election_call'] = \\r
-            perf_counts.get('mon.election_call', {}).get('value', 0)\r
-        d_mon.fields['election_win'] = \\r
-            perf_counts.get('mon.election_win', {}).get('value', 0)\r
-        d_mon.fields['election_lose'] = \\r
-            perf_counts.get('election_lose', {}).get('value', 0)\r
-        self.data.append(d_mon)\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        perf_data = obj_api.get_all_perf_counters()\r
-        if not perf_data and not isinstance(perf_data, dict):\r
-            self._logger.error('unable to get all perf counters')\r
-            return\r
-        cluster_id = obj_api.get_cluster_id()\r
-        for n_name, i_perf in perf_data.iteritems():\r
-            if n_name[0:3].lower() == 'mon':\r
-                self._generate_mon(cluster_id, n_name, i_perf)\r
-            elif n_name[0:3].lower() == 'osd':\r
-                self._generate_osd(cluster_id, n_name, i_perf)\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/ceph_pool.py b/src/pybind/mgr/diskprediction/agent/metrics/ceph_pool.py
deleted file mode 100644 (file)
index 86ee10a..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class CephPool(MetricsField):\r
-    """ Ceph pool structure """\r
-    measurement = 'ceph_pool'\r
-\r
-    def __init__(self):\r
-        super(CephPool, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.tags['pool_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['bytes_used'] = None\r
-        self.fields['max_avail'] = None\r
-        self.fields['objects'] = None\r
-        self.fields['wr_bytes'] = None\r
-        self.fields['dirty'] = None\r
-        self.fields['rd_bytes'] = None\r
-        self.fields['raw_bytes_used'] = None\r
-\r
-\r
-class CephPoolAgent(MetricsAgent):\r
-    measurement = 'ceph_pool'\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        df_data = obj_api.get('df')\r
-        cluster_id = obj_api.get_cluster_id()\r
-        for pool in df_data.get('pools', []):\r
-            d_pool = CephPool()\r
-            p_id = pool.get('id')\r
-            d_pool.tags['cluster_id'] = cluster_id\r
-            d_pool.tags['pool_id'] = p_id\r
-            d_pool.fields['agenthost'] = socket.gethostname()\r
-            d_pool.tags['agenthost_domain_id'] = \\r
-                '%s_%s' % (cluster_id, d_pool.fields['agenthost'])\r
-            d_pool.fields['bytes_used'] = \\r
-                pool.get('stats', {}).get('bytes_used', 0)\r
-            d_pool.fields['max_avail'] = \\r
-                pool.get('stats', {}).get('max_avail', 0)\r
-            d_pool.fields['objects'] = \\r
-                pool.get('stats', {}).get('objects', 0)\r
-            d_pool.fields['wr_bytes'] = \\r
-                pool.get('stats', {}).get('wr_bytes', 0)\r
-            d_pool.fields['dirty'] = \\r
-                pool.get('stats', {}).get('dirty', 0)\r
-            d_pool.fields['rd_bytes'] = \\r
-                pool.get('stats', {}).get('rd_bytes', 0)\r
-            d_pool.fields['raw_bytes_used'] = \\r
-                pool.get('stats', {}).get('raw_bytes_used', 0)\r
-            self.data.append(d_pool)\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/db_relay.py b/src/pybind/mgr/diskprediction/agent/metrics/db_relay.py
deleted file mode 100644 (file)
index 1d5ca23..0000000
+++ /dev/null
@@ -1,610 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common import get_human_readable\r
-from ...common.clusterdata import ClusterAPI\r
-from ...common.cypher import CypherOP, NodeInfo\r
-\r
-\r
-class BaseDP(object):\r
-    """ basic diskprediction structure """\r
-    _fields = []\r
-\r
-    def __init__(self, *args, **kwargs):\r
-        if len(args) > len(self._fields):\r
-            raise TypeError('Expected {} arguments'.format(len(self._fields)))\r
-\r
-        for name, value in zip(self._fields, args):\r
-            setattr(self, name, value)\r
-\r
-        for name in self._fields[len(args):]:\r
-            setattr(self, name, kwargs.pop(name))\r
-\r
-        if kwargs:\r
-            raise TypeError('Invalid argument(s): {}'.format(','.join(kwargs)))\r
-\r
-\r
-class MGRDpCeph(BaseDP):\r
-    _fields = [\r
-        'fsid', 'health', 'max_osd', 'size',\r
-        'avail_size', 'raw_used', 'raw_used_percent'\r
-    ]\r
-\r
-\r
-class MGRDpHost(BaseDP):\r
-    _fields = ['fsid', 'host', 'ipaddr']\r
-\r
-\r
-class MGRDpMon(BaseDP):\r
-    _fields = ['fsid', 'host', 'ipaddr']\r
-\r
-\r
-class MGRDpOsd(BaseDP):\r
-    _fields = [\r
-        'fsid', 'host', '_id', 'uuid', 'up', '_in', 'weight', 'public_addr',\r
-        'cluster_addr', 'state', 'backend_filestore_dev_node',\r
-        'backend_filestore_partition_path', 'ceph_release', 'devices',\r
-        'osd_data', 'osd_journal', 'rotational'\r
-    ]\r
-\r
-\r
-class MGRDpMds(BaseDP):\r
-    _fields = ['fsid', 'host', 'ipaddr']\r
-\r
-\r
-class MGRDpPool(BaseDP):\r
-    _fields = [\r
-        'fsid', 'size', 'pool_name', 'pool_id', 'type', 'min_size',\r
-        'pg_num', 'pgp_num', 'created_time', 'used', 'pgids'\r
-    ]\r
-\r
-\r
-class MGRDpRBD(BaseDP):\r
-    _fields = ['fsid', '_id', 'name', 'pool_name', 'size', 'pgids']\r
-\r
-\r
-class MGRDpPG(BaseDP):\r
-    _fields = [\r
-        'fsid', 'pgid', 'up_osds', 'acting_osds', 'state',\r
-        'objects', 'degraded', 'misplaced', 'unfound'\r
-    ]\r
-\r
-\r
-class MGRDpDisk(BaseDP):\r
-    _fields = ['host_domain_id', 'model', 'size']\r
-\r
-\r
-class DBRelay(MetricsField):\r
-    """ DB Relay structure """\r
-    measurement = 'db_relay'\r
-\r
-    def __init__(self):\r
-        super(DBRelay, self).__init__()\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['dc_tag'] = 'na'\r
-        self.tags['host'] = None\r
-        self.fields['cmd'] = None\r
-\r
-\r
-class DBRelayAgent(MetricsAgent):\r
-    measurement = 'db_relay'\r
-\r
-    def __init__(self, *args, **kwargs):\r
-        super(DBRelayAgent, self).__init__(*args, **kwargs)\r
-        self._cluster_node = self._get_cluster_node()\r
-        self._cluster_id = self._cluster_node.domain_id\r
-        self._host_nodes = dict()\r
-        self._osd_nodes = dict()\r
-\r
-    def _get_cluster_node(self):\r
-        db = ClusterAPI(self._module_inst)\r
-        cluster_id = db.get_cluster_id()\r
-        dp_cluster = MGRDpCeph(\r
-            fsid=cluster_id,\r
-            health=db.get_health_status(),\r
-            max_osd=db.get_max_osd(),\r
-            size=db.get_global_total_size(),\r
-            avail_size=db.get_global_avail_size(),\r
-            raw_used=db.get_global_raw_used_size(),\r
-            raw_used_percent=db.get_global_raw_used_percent()\r
-        )\r
-        cluster_id = db.get_cluster_id()\r
-        cluster_name = cluster_id[-12:]\r
-        cluster_node = NodeInfo(\r
-            label='CephCluster',\r
-            domain_id=cluster_id,\r
-            name='cluster-{}'.format(cluster_name),\r
-            meta=dp_cluster.__dict__\r
-        )\r
-        return cluster_node\r
-\r
-    def _cluster_contains_host(self):\r
-        cluster_id = self._cluster_id\r
-        cluster_node = self._cluster_node\r
-\r
-        db = ClusterAPI(self._module_inst)\r
-\r
-        hosts = set()\r
-\r
-        # Add host from osd\r
-        osd_data = db.get_osds()\r
-        for _data in osd_data:\r
-            osd_id = _data['osd']\r
-            if not _data.get('in'):\r
-                continue\r
-            osd_addr = _data['public_addr'].split(':')[0]\r
-            osd_metadata = db.get_osd_metadata(osd_id)\r
-            if osd_metadata:\r
-                osd_host = osd_metadata['hostname']\r
-                hosts.add((osd_host, osd_addr))\r
-\r
-        # Add host from mon\r
-        mons = db.get_mons()\r
-        for _data in mons:\r
-            mon_host = _data['name']\r
-            mon_addr = _data['public_addr'].split(':')[0]\r
-            if mon_host:\r
-                hosts.add((mon_host, mon_addr))\r
-\r
-        # Add host from mds\r
-        file_systems = db.get_file_systems()\r
-        for _data in file_systems:\r
-            mds_info = _data.get('mdsmap').get('info')\r
-            for _gid in mds_info:\r
-                mds_data = mds_info[_gid]\r
-                mds_addr = mds_data.get('addr').split(':')[0]\r
-                mds_host = mds_data.get('name')\r
-                if mds_host:\r
-                    hosts.add((mds_host, mds_addr))\r
-\r
-        # create node relation\r
-        for tp in hosts:\r
-            data = DBRelay()\r
-            host = tp[0]\r
-            self._host_nodes[host] = None\r
-\r
-            host_node = NodeInfo(\r
-                label='VMHost',\r
-                domain_id='{}_{}'.format(cluster_id, host),\r
-                name=host,\r
-                meta={}\r
-            )\r
-\r
-            # add osd node relationship\r
-            cypher_cmd = CypherOP.add_link(\r
-                cluster_node,\r
-                host_node,\r
-                'CephClusterContainsHost'\r
-            )\r
-            cluster_host = socket.gethostname()\r
-            data.fields['agenthost'] = cluster_host\r
-            data.tags['agenthost_domain_id'] = \\r
-                str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-            data.tags['host'] = cluster_host\r
-            data.fields['cmd'] = str(cypher_cmd)\r
-            self._host_nodes[host] = host_node\r
-            self.data.append(data)\r
-\r
-    def _host_contains_mon(self):\r
-        cluster_id = self._cluster_id\r
-\r
-        db = ClusterAPI(self._module_inst)\r
-        mons = db.get_mons()\r
-        for mon in mons:\r
-            mon_name = mon.get('name', '')\r
-            mon_addr = mon.get('addr', '').split(':')[0]\r
-            for hostname in self._host_nodes:\r
-                if hostname != mon_name:\r
-                    continue\r
-\r
-                host_node = self._host_nodes[hostname]\r
-                data = DBRelay()\r
-                dp_mon = MGRDpMon(\r
-                    fsid=cluster_id,\r
-                    host=mon_name,\r
-                    ipaddr=mon_addr\r
-                )\r
-\r
-                # create mon node\r
-                mon_node = NodeInfo(\r
-                    label='CephMon',\r
-                    domain_id='{}.mon.{}'.format(cluster_id, mon_name),\r
-                    name=mon_name,\r
-                    meta=dp_mon.__dict__\r
-                )\r
-\r
-                # add mon node relationship\r
-                cypher_cmd = CypherOP.add_link(\r
-                    host_node,\r
-                    mon_node,\r
-                    'HostContainsMon'\r
-                )\r
-                cluster_host = socket.gethostname()\r
-                data.fields['agenthost'] = cluster_host\r
-                data.tags['agenthost_domain_id'] = \\r
-                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                data.tags['host'] = cluster_host\r
-                data.fields['cmd'] = str(cypher_cmd)\r
-                self.data.append(data)\r
-\r
-    def _host_contains_osd(self):\r
-        cluster_id = self._cluster_id\r
-\r
-        db = ClusterAPI(self._module_inst)\r
-        osd_data = db.get_osd_data()\r
-        osd_journal = db.get_osd_journal()\r
-        for _data in db.get_osds():\r
-            osd_id = _data['osd']\r
-            osd_uuid = _data['uuid']\r
-            osd_up = _data['up']\r
-            osd_in = _data['in']\r
-            if not osd_in:\r
-                continue\r
-            osd_weight = _data['weight']\r
-            osd_public_addr = _data['public_addr']\r
-            osd_cluster_addr = _data['cluster_addr']\r
-            osd_state = _data['state']\r
-            osd_metadata = db.get_osd_metadata(osd_id)\r
-            if osd_metadata:\r
-                data = DBRelay()\r
-                osd_host = osd_metadata['hostname']\r
-                osd_ceph_version = osd_metadata['ceph_version']\r
-                osd_rotational = osd_metadata['rotational']\r
-                osd_devices = osd_metadata['devices'].split(',')\r
-\r
-                # filter 'dm' device.\r
-                devices = []\r
-                for devname in osd_devices:\r
-                    if 'dm' in devname:\r
-                        continue\r
-                    devices.append(devname)\r
-\r
-                for hostname in self._host_nodes:\r
-                    if hostname != osd_host:\r
-                        continue\r
-\r
-                    self._osd_nodes[str(osd_id)] = None\r
-                    host_node = self._host_nodes[hostname]\r
-                    osd_dev_node = None\r
-                    for dev_node in ['backend_filestore_dev_node',\r
-                                     'bluestore_bdev_dev_node']:\r
-                        val = osd_metadata.get(dev_node)\r
-                        if val and val.lower() != 'unknown':\r
-                            osd_dev_node = val\r
-                            break\r
-\r
-                    osd_dev_path = None\r
-                    for dev_path in ['backend_filestore_partition_path',\r
-                                     'bluestore_bdev_partition_path']:\r
-                        val = osd_metadata.get(dev_path)\r
-                        if val and val.lower() != 'unknown':\r
-                            osd_dev_path = val\r
-                            break\r
-\r
-                    dp_osd = MGRDpOsd(\r
-                        fsid=cluster_id,\r
-                        host=osd_host,\r
-                        _id=osd_id,\r
-                        uuid=osd_uuid,\r
-                        up=osd_up,\r
-                        _in=osd_in,\r
-                        weight=osd_weight,\r
-                        public_addr=osd_public_addr,\r
-                        cluster_addr=osd_cluster_addr,\r
-                        state=','.join(osd_state),\r
-                        backend_filestore_dev_node=osd_dev_node,\r
-                        backend_filestore_partition_path=osd_dev_path,\r
-                        ceph_release=osd_ceph_version,\r
-                        osd_data=osd_data,\r
-                        osd_journal=osd_journal,\r
-                        devices=','.join(devices),\r
-                        rotational=osd_rotational)\r
-\r
-                    # create osd node\r
-                    osd_node = NodeInfo(\r
-                        label='CephOsd',\r
-                        domain_id='{}.osd.{}'.format(cluster_id, osd_id),\r
-                        name='OSD.{}'.format(osd_id),\r
-                        meta=dp_osd.__dict__\r
-                    )\r
-                    # add osd node relationship\r
-                    cypher_cmd = CypherOP.add_link(\r
-                        host_node,\r
-                        osd_node,\r
-                        'HostContainsOsd'\r
-                    )\r
-                    cluster_host = socket.gethostname()\r
-                    data.fields['agenthost'] = cluster_host\r
-                    data.tags['agenthost_domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                    data.tags['host'] = cluster_host\r
-                    data.fields['cmd'] = str(cypher_cmd)\r
-                    self._osd_nodes[str(osd_id)] = osd_node\r
-                    self.data.append(data)\r
-\r
-    def _host_contains_mds(self):\r
-        cluster_id = self._cluster_id\r
-\r
-        db = ClusterAPI(self._module_inst)\r
-        file_systems = db.get_file_systems()\r
-\r
-        for _data in file_systems:\r
-            mds_info = _data.get('mdsmap').get('info')\r
-            for _gid in mds_info:\r
-                mds_data = mds_info[_gid]\r
-                mds_addr = mds_data.get('addr').split(':')[0]\r
-                mds_host = mds_data.get('name')\r
-                mds_gid = mds_data.get('gid')\r
-\r
-                for hostname in self._host_nodes:\r
-                    if hostname != mds_host:\r
-                        continue\r
-\r
-                    data = DBRelay()\r
-                    host_node = self._host_nodes[hostname]\r
-                    dp_mds = MGRDpMds(\r
-                        fsid=cluster_id,\r
-                        host=mds_host,\r
-                        ipaddr=mds_addr\r
-                    )\r
-\r
-                    # create osd node\r
-                    mds_node = NodeInfo(\r
-                        label='CephMds',\r
-                        domain_id='{}.mds.{}'.format(cluster_id, mds_gid),\r
-                        name='MDS.{}'.format(mds_gid),\r
-                        meta=dp_mds.__dict__\r
-                    )\r
-                    # add osd node relationship\r
-                    cypher_cmd = CypherOP.add_link(\r
-                        host_node,\r
-                        mds_node,\r
-                        'HostContainsMds'\r
-                    )\r
-                    cluster_host = socket.gethostname()\r
-                    data.fields['agenthost'] = cluster_host\r
-                    data.tags['agenthost_domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                    data.tags['host'] = cluster_host\r
-                    data.fields['cmd'] = str(cypher_cmd)\r
-                    self.data.append(data)\r
-\r
-    def _osd_contains_pg(self):\r
-        cluster_id = self._cluster_id\r
-        db = ClusterAPI(self._module_inst)\r
-\r
-        pg_stats = db.get_pg_stats()\r
-        for osd_data in db.get_osds():\r
-            osd_id = osd_data['osd']\r
-            if not osd_data.get('in'):\r
-                continue\r
-            for _data in pg_stats:\r
-                state = _data.get('state')\r
-                up = _data.get('up')\r
-                acting = _data.get('acting')\r
-                pgid = _data.get('pgid')\r
-                stat_sum = _data.get('stat_sum', {})\r
-                num_objects = stat_sum.get('num_objects')\r
-                num_objects_degraded = stat_sum.get('num_objects_degraded')\r
-                num_objects_misplaced = stat_sum.get('num_objects_misplaced')\r
-                num_objects_unfound = stat_sum.get('num_objects_unfound')\r
-                if osd_id in up:\r
-                    if str(osd_id) not in self._osd_nodes:\r
-                        continue\r
-                    osd_node = self._osd_nodes[str(osd_id)]\r
-                    data = DBRelay()\r
-                    dp_pg = MGRDpPG(\r
-                        fsid=cluster_id,\r
-                        pgid=pgid,\r
-                        up_osds=','.join(str(x) for x in up),\r
-                        acting_osds=','.join(str(x) for x in acting),\r
-                        state=state,\r
-                        objects=num_objects,\r
-                        degraded=num_objects_degraded,\r
-                        misplaced=num_objects_misplaced,\r
-                        unfound=num_objects_unfound\r
-                    )\r
-\r
-                    # create pg node\r
-                    pg_node = NodeInfo(\r
-                        label='CephPG',\r
-                        domain_id='{}.pg.{}'.format(cluster_id, pgid),\r
-                        name='PG.{}'.format(pgid),\r
-                        meta=dp_pg.__dict__\r
-                    )\r
-\r
-                    # add pg node relationship\r
-                    cypher_cmd = CypherOP.add_link(\r
-                        osd_node,\r
-                        pg_node,\r
-                        'OsdContainsPg'\r
-                    )\r
-                    cluster_host = socket.gethostname()\r
-                    data.fields['agenthost'] = cluster_host\r
-                    data.tags['agenthost_domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                    data.tags['host'] = cluster_host\r
-                    data.fields['cmd'] = str(cypher_cmd)\r
-                    self.data.append(data)\r
-\r
-    def _osd_contains_disk(self):\r
-        cluster_id = self._cluster_id\r
-        db = ClusterAPI(self._module_inst)\r
-\r
-        osd_metadata = db.get_osd_metadata()\r
-        for osd_id in osd_metadata:\r
-            osds_smart = db.get_osd_smart(osd_id)\r
-            if not osds_smart:\r
-                continue\r
-\r
-            if str(osd_id) not in self._osd_nodes:\r
-                continue\r
-\r
-            hostname = db.get_osd_hostname(osd_id)\r
-            osd_node = self._osd_nodes[str(osd_id)]\r
-            for dev_name, s_val in osds_smart.iteritems():\r
-                data = DBRelay()\r
-                disk_domain_id = str(dev_name)\r
-                try:\r
-                    if isinstance(s_val.get('user_capacity'), dict):\r
-                        user_capacity = \\r
-                            s_val['user_capacity'].get('bytes', {}).get('n', 0)\r
-                    else:\r
-                        user_capacity = s_val.get('user_capacity', 0)\r
-                except ValueError:\r
-                    user_capacity = 0\r
-                dp_disk = MGRDpDisk(\r
-                    host_domain_id='{}_{}'.format(cluster_id, hostname),\r
-                    model=s_val.get('model_name', ''),\r
-                    size=get_human_readable(\r
-                        int(user_capacity), 0)\r
-                )\r
-\r
-                # create disk node\r
-                disk_node = NodeInfo(\r
-                    label='VMDisk',\r
-                    domain_id=disk_domain_id,\r
-                    name=dev_name,\r
-                    meta=dp_disk.__dict__\r
-                )\r
-\r
-                # add disk node relationship\r
-                cypher_cmd = CypherOP.add_link(\r
-                    osd_node,\r
-                    disk_node,\r
-                    'DiskOfOsd'\r
-                )\r
-                cluster_host = socket.gethostname()\r
-                data.fields['agenthost'] = cluster_host\r
-                data.tags['agenthost_domain_id'] = \\r
-                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                data.tags['host'] = cluster_host\r
-                data.fields['cmd'] = str(cypher_cmd)\r
-                self.data.append(data)\r
-\r
-                # host node and disk node relationship\r
-                data = DBRelay()\r
-                host_node = NodeInfo(\r
-                    label='VMHost',\r
-                    domain_id='{}_{}'.format(cluster_id, hostname),\r
-                    name=hostname,\r
-                    meta={}\r
-                )\r
-\r
-                # add osd node relationship\r
-                cypher_cmd = CypherOP.add_link(\r
-                    host_node,\r
-                    disk_node,\r
-                    'VmHostContainsVmDisk'\r
-                )\r
-                data.fields['agenthost'] = cluster_host\r
-                data.tags['agenthost_domain_id'] = \\r
-                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                data.tags['host'] = cluster_host\r
-                data.fields['cmd'] = str(cypher_cmd)\r
-                self.data.append(data)\r
-                self.data.append(data)\r
-\r
-    def _rbd_contains_pg(self):\r
-        cluster_id = self._cluster_id\r
-        db = ClusterAPI(self._module_inst)\r
-\r
-        pg_stats = db.get_pg_stats()\r
-        pools = db.get_osd_pools()\r
-        for pool_data in pools:\r
-            pool_name = pool_data.get('pool_name')\r
-            rbd_list = db.get_rbd_list(pool_name=pool_name)\r
-            for rbd_data in rbd_list:\r
-                image_name = rbd_data.get('name')\r
-                # Sometimes get stuck on query rbd objects info\r
-                rbd_info = db.get_rbd_info(pool_name, image_name)\r
-                rbd_id = rbd_info.get('id')\r
-                rbd_size = rbd_info.get('size')\r
-                rbd_pgids = rbd_info.get('pgs', [])\r
-\r
-                pgids = []\r
-                for _data in rbd_pgids:\r
-                    pgid = _data.get('pgid')\r
-                    if pgid:\r
-                        pgids.append(pgid)\r
-\r
-                # RBD info\r
-                dp_rbd = MGRDpRBD(\r
-                    fsid=cluster_id,\r
-                    _id=rbd_id,\r
-                    name=image_name,\r
-                    pool_name=pool_name,\r
-                    size=rbd_size,\r
-                    pgids=','.join(pgids)\r
-                )\r
-\r
-                # create rbd node\r
-                rbd_node = NodeInfo(\r
-                    label='CephRBD',\r
-                    domain_id='{}.rbd.{}'.format(cluster_id, image_name),\r
-                    name=image_name,\r
-                    meta=dp_rbd.__dict__\r
-                )\r
-\r
-                for _data in pg_stats:\r
-                    pgid = _data.get('pgid')\r
-                    if pgid not in pgids:\r
-                        continue\r
-\r
-                    state = _data.get('state')\r
-                    up = _data.get('up')\r
-                    acting = _data.get('acting')\r
-                    stat_sum = _data.get('stat_sum', {})\r
-                    num_objects = stat_sum.get('num_objects')\r
-                    num_objects_degraded = stat_sum.get('num_objects_degraded')\r
-                    num_objects_misplaced = stat_sum.get('num_objects_misplaced')\r
-                    num_objects_unfound = stat_sum.get('num_objects_unfound')\r
-\r
-                    data = DBRelay()\r
-                    dp_pg = MGRDpPG(\r
-                        fsid=cluster_id,\r
-                        pgid=pgid,\r
-                        up_osds=','.join(str(x) for x in up),\r
-                        acting_osds=','.join(str(x) for x in acting),\r
-                        state=state,\r
-                        objects=num_objects,\r
-                        degraded=num_objects_degraded,\r
-                        misplaced=num_objects_misplaced,\r
-                        unfound=num_objects_unfound\r
-                    )\r
-\r
-                    # create pg node\r
-                    pg_node = NodeInfo(\r
-                        label='CephPG',\r
-                        domain_id='{}.pg.{}'.format(cluster_id, pgid),\r
-                        name='PG.{}'.format(pgid),\r
-                        meta=dp_pg.__dict__\r
-                    )\r
-\r
-                    # add rbd node relationship\r
-                    cypher_cmd = CypherOP.add_link(\r
-                        rbd_node,\r
-                        pg_node,\r
-                        'RbdContainsPg'\r
-                    )\r
-                    cluster_host = socket.gethostname()\r
-                    data.fields['agenthost'] = cluster_host\r
-                    data.tags['agenthost_domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                    data.tags['host'] = cluster_host\r
-                    data.fields['cmd'] = str(cypher_cmd)\r
-                    self.data.append(data)\r
-\r
-    def _collect_data(self):\r
-        if not self._module_inst:\r
-            return\r
-\r
-        self._cluster_contains_host()\r
-        self._host_contains_osd()\r
-        self._host_contains_mon()\r
-        self._host_contains_mds()\r
-        self._osd_contains_pg()\r
-        self._osd_contains_disk()\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_agent.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_agent.py
deleted file mode 100644 (file)
index 63c8e87..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-from __future__ import absolute_import
-
-import socket
-import time
-
-from . import AGENT_VERSION, MetricsAgent, MetricsField
-from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING
-from ...common.clusterdata import ClusterAPI
-
-
-class SAIAgentFields(MetricsField):
-    """ SAI DiskSmart structure """
-    measurement = 'sai_agent'
-
-    def __init__(self):
-        super(SAIAgentFields, self).__init__()
-        self.tags['agenthost_domain_id'] = None
-        self.fields['agent_type'] = str('ceph')
-        self.fields['agent_version'] = str(AGENT_VERSION)
-        self.fields['agenthost'] = ''
-        self.fields['cluster_domain_id'] = ''
-        self.fields['heartbeat_interval'] = ''
-        self.fields['host_ip'] = ''
-        self.fields['host_name'] = ''
-        self.fields['is_error'] = False
-        self.fields['is_ceph_error'] = False
-        self.fields['needs_warning'] = False
-        self.fields['send'] = None
-
-
-class SAIAgent(MetricsAgent):
-    measurement = 'sai_agent'
-
-    def _collect_data(self):
-        mgr_id = []
-        c_data = SAIAgentFields()
-        obj_api = ClusterAPI(self._module_inst)
-        svc_data = obj_api.get_server(socket.gethostname())
-        cluster_state = obj_api.get_health_status()
-        if not svc_data:
-            raise Exception('unable to get %s service info' % socket.gethostname())
-        # Filter mgr id
-        for s in svc_data.get('services', []):
-            if s.get('type', '') == 'mgr':
-                mgr_id.append(s.get('id'))
-
-        for _id in mgr_id:
-            mgr_meta = obj_api.get_mgr_metadata(_id)
-            cluster_id = obj_api.get_cluster_id()
-            c_data.fields['cluster_domain_id'] = str(cluster_id)
-            c_data.fields['agenthost'] = str(socket.gethostname())
-            c_data.tags['agenthost_domain_id'] = \
-                str('%s_%s' % (cluster_id, c_data.fields['agenthost']))
-            c_data.fields['heartbeat_interval'] = \
-                int(obj_api.get_configuration('diskprediction_upload_metrics_interval'))
-            c_data.fields['host_ip'] = str(mgr_meta.get('addr', '127.0.0.1'))
-            c_data.fields['host_name'] = str(socket.gethostname())
-            if obj_api.module.status.get('status', '') in [DP_MGR_STAT_WARNING, DP_MGR_STAT_FAILED]:
-                c_data.fields['is_error'] = bool(True)
-            else:
-                c_data.fields['is_error'] = bool(False)
-            if cluster_state in ['HEALTH_ERR', 'HEALTH_WARN']:
-                c_data.fields['is_ceph_error'] = bool(True)
-                c_data.fields['needs_warning'] = bool(True)
-                c_data.fields['is_error'] = bool(True)
-                c_data.fields['problems'] = str(obj_api.get_health_checks())
-            else:
-                c_data.fields['is_ceph_error'] = bool(False)
-            c_data.fields['send'] = int(time.time() * 1000)
-            self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_cluster.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_cluster.py
deleted file mode 100644 (file)
index ac9cab9..0000000
+++ /dev/null
@@ -1,36 +0,0 @@
-from __future__ import absolute_import
-
-import socket
-
-from . import AGENT_VERSION, MetricsAgent, MetricsField
-from ...common.clusterdata import ClusterAPI
-
-
-class SAIClusterFields(MetricsField):
-    """ SAI Host structure """
-    measurement = 'sai_cluster'
-
-    def __init__(self):
-        super(SAIClusterFields, self).__init__()
-        self.tags['domain_id'] = None
-        self.fields['agenthost'] = None
-        self.fields['agenthost_domain_id'] = None
-        self.fields['name'] = None
-        self.fields['agent_version'] = str(AGENT_VERSION)
-
-
-class SAICluserAgent(MetricsAgent):
-    measurement = 'sai_cluster'
-
-    def _collect_data(self):
-        c_data = SAIClusterFields()
-        obj_api = ClusterAPI(self._module_inst)
-        cluster_id = obj_api.get_cluster_id()
-
-        c_data.tags['domain_id'] = str(cluster_id)
-        c_data.tags['host_domain_id'] = '%s_%s' % (str(cluster_id), str(socket.gethostname()))
-        c_data.fields['agenthost'] = str(socket.gethostname())
-        c_data.tags['agenthost_domain_id'] = \
-            str('%s_%s' % (cluster_id, c_data.fields['agenthost']))
-        c_data.fields['name'] = 'ceph mgr plugin'
-        self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_disk.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_disk.py
deleted file mode 100644 (file)
index 447b24b..0000000
+++ /dev/null
@@ -1,172 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import AGENT_VERSION, MetricsAgent, MetricsField\r
-from ...common import get_human_readable\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class SAIDiskFields(MetricsField):\r
-    """ SAI Disk structure """\r
-    measurement = 'sai_disk'\r
-\r
-    def __init__(self):\r
-        super(SAIDiskFields, self).__init__()\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['disk_domain_id'] = None\r
-        self.tags['disk_name'] = None\r
-        self.tags['disk_wwn'] = None\r
-        self.tags['primary_key'] = None\r
-        self.fields['cluster_domain_id'] = None\r
-        self.fields['host_domain_id'] = None\r
-        self.fields['model'] = None\r
-        self.fields['serial_number'] = None\r
-        self.fields['size'] = None\r
-        self.fields['vendor'] = None\r
-        self.fields['agent_version'] = str(AGENT_VERSION)\r
-\r
-        """disk_status\r
-        0: unknown  1: good     2: failure\r
-        """\r
-        self.fields['disk_status'] = 0\r
-\r
-        """disk_type\r
-        0: unknown  1: HDD      2: SSD      3: SSD NVME\r
-        4: SSD SAS  5: SSD SATA 6: HDD SAS  7: HDD SATA\r
-        """\r
-        self.fields['disk_type'] = 0\r
-\r
-\r
-class SAIDiskAgent(MetricsAgent):\r
-    measurement = 'sai_disk'\r
-\r
-    @staticmethod\r
-    def _convert_disk_type(is_ssd, sata_version, protocol):\r
-        """ return type:\r
-            0: "Unknown', 1: 'HDD',\r
-            2: 'SSD",     3: "SSD NVME",\r
-            4: "SSD SAS", 5: "SSD SATA",\r
-            6: "HDD SAS", 7: "HDD SATA"\r
-        """\r
-        if is_ssd:\r
-            if sata_version and not protocol:\r
-                disk_type = 5\r
-            elif 'SCSI'.lower() in protocol.lower():\r
-                disk_type = 4\r
-            elif 'NVMe'.lower() in protocol.lower():\r
-                disk_type = 3\r
-            else:\r
-                disk_type = 2\r
-        else:\r
-            if sata_version and not protocol:\r
-                disk_type = 7\r
-            elif 'SCSI'.lower() in protocol.lower():\r
-                disk_type = 6\r
-            else:\r
-                disk_type = 1\r
-        return disk_type\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        cluster_id = obj_api.get_cluster_id()\r
-        osds = obj_api.get_osds()\r
-        for osd in osds:\r
-            if osd.get('osd') is None:\r
-                continue\r
-            if not osd.get('in'):\r
-                continue\r
-            osds_meta = obj_api.get_osd_metadata(osd.get('osd'))\r
-            if not osds_meta:\r
-                continue\r
-            osds_smart = obj_api.get_osd_smart(osd.get('osd'))\r
-            if not osds_smart:\r
-                continue\r
-            for dev_name, s_val in osds_smart.iteritems():\r
-                d_data = SAIDiskFields()\r
-                d_data.tags['disk_name'] = str(dev_name)\r
-                d_data.fields['cluster_domain_id'] = str(cluster_id)\r
-                d_data.tags['host_domain_id'] = \\r
-                    str('%s_%s'\r
-                        % (cluster_id, osds_meta.get('hostname', 'None')))\r
-                d_data.fields['agenthost'] = str(socket.gethostname())\r
-                d_data.tags['agenthost_domain_id'] = \\r
-                    str('%s_%s' % (cluster_id, d_data.fields['agenthost']))\r
-                serial_number = s_val.get('serial_number')\r
-                wwn = s_val.get('wwn', {})\r
-                wwpn = ''\r
-                if wwn:\r
-                    wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))\r
-                    for k in wwn.keys():\r
-                        if k in ['naa', 't10', 'eui', 'iqn']:\r
-                            wwpn = ('%X%s' % (wwn[k], wwpn)).lower()\r
-                            break\r
-\r
-                if wwpn:\r
-                    d_data.tags['disk_domain_id'] = str(dev_name)\r
-                    d_data.tags['disk_wwn'] = str(wwpn)\r
-                    if serial_number:\r
-                        d_data.fields['serial_number'] = str(serial_number)\r
-                    else:\r
-                        d_data.fields['serial_number'] = str(wwpn)\r
-                elif serial_number:\r
-                    d_data.tags['disk_domain_id'] = str(dev_name)\r
-                    d_data.fields['serial_number'] = str(serial_number)\r
-                    if wwpn:\r
-                        d_data.tags['disk_wwn'] = str(wwpn)\r
-                    else:\r
-                        d_data.tags['disk_wwn'] = str(serial_number)\r
-                else:\r
-                    d_data.tags['disk_domain_id'] = str(dev_name)\r
-                    d_data.tags['disk_wwn'] = str(dev_name)\r
-                    d_data.fields['serial_number'] = str(dev_name)\r
-                d_data.tags['primary_key'] = \\r
-                    str('%s%s%s'\r
-                        % (cluster_id, d_data.tags['host_domain_id'],\r
-                           d_data.tags['disk_domain_id']))\r
-                d_data.fields['disk_status'] = int(1)\r
-                is_ssd = True if s_val.get('rotation_rate') == 0 else False\r
-                vendor = s_val.get('vendor', None)\r
-                model = s_val.get('model_name', None)\r
-                if s_val.get('sata_version', {}).get('string'):\r
-                    sata_version = s_val['sata_version']['string']\r
-                else:\r
-                    sata_version = ''\r
-                if s_val.get('device', {}).get('protocol'):\r
-                    protocol = s_val['device']['protocol']\r
-                else:\r
-                    protocol = ''\r
-                d_data.fields['disk_type'] = \\r
-                    self._convert_disk_type(is_ssd, sata_version, protocol)\r
-                d_data.fields['firmware_version'] = \\r
-                    str(s_val.get('firmware_version'))\r
-                if model:\r
-                    d_data.fields['model'] = str(model)\r
-                if vendor:\r
-                    d_data.fields['vendor'] = str(vendor)\r
-                if sata_version:\r
-                    d_data.fields['sata_version'] = str(sata_version)\r
-                if s_val.get('logical_block_size'):\r
-                    d_data.fields['sector_size'] = \\r
-                        str(str(s_val['logical_block_size']))\r
-                d_data.fields['transport_protocol'] = str('')\r
-                d_data.fields['vendor'] = \\r
-                    str(s_val.get('model_family', '')).replace('\"', '\'')\r
-                try:\r
-                    if isinstance(s_val.get('user_capacity'), dict):\r
-                        user_capacity = \\r
-                            s_val['user_capacity'].get('bytes', {}).get('n', 0)\r
-                    else:\r
-                        user_capacity = s_val.get('user_capacity', 0)\r
-                except ValueError:\r
-                    user_capacity = 0\r
-                d_data.fields['size'] = \\r
-                    get_human_readable(int(user_capacity), 0)\r
-\r
-                if s_val.get('smart_status', {}).get('passed'):\r
-                    d_data.fields['smart_health_status'] = 'PASSED'\r
-                else:\r
-                    d_data.fields['smart_health_status'] = 'FAILED'\r
-                self.data.append(d_data)\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_disk_smart.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_disk_smart.py
deleted file mode 100644 (file)
index 509d926..0000000
+++ /dev/null
@@ -1,156 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import datetime\r
-import json\r
-import _strptime\r
-import socket\r
-import time\r
-\r
-from . import AGENT_VERSION, MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class SAIDiskSmartFields(MetricsField):\r
-    """ SAI DiskSmart structure """\r
-    measurement = 'sai_disk_smart'\r
-\r
-    def __init__(self):\r
-        super(SAIDiskSmartFields, self).__init__()\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['disk_domain_id'] = None\r
-        self.tags['disk_name'] = None\r
-        self.tags['disk_wwn'] = None\r
-        self.tags['primary_key'] = None\r
-        self.fields['cluster_domain_id'] = None\r
-        self.fields['host_domain_id'] = None\r
-        self.fields['agent_version'] = str(AGENT_VERSION)\r
-\r
-\r
-class SAIDiskSmartAgent(MetricsAgent):\r
-    measurement = 'sai_disk_smart'\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        cluster_id = obj_api.get_cluster_id()\r
-        osds = obj_api.get_osds()\r
-        for osd in osds:\r
-            if osd.get('osd') is None:\r
-                continue\r
-            if not osd.get('in'):\r
-                continue\r
-            osds_meta = obj_api.get_osd_metadata(osd.get('osd'))\r
-            if not osds_meta:\r
-                continue\r
-            devs_info = obj_api.get_osd_device_id(osd.get('osd'))\r
-            if devs_info:\r
-                for dev_name, dev_info in devs_info.iteritems():\r
-                    osds_smart = obj_api.get_device_health(dev_info['dev_id'])\r
-                    if not osds_smart:\r
-                        continue\r
-                    # Always pass through last smart data record\r
-                    o_key = sorted(osds_smart.iterkeys(), reverse=True)[0]\r
-                    if o_key:\r
-                        s_date = o_key\r
-                        s_val = osds_smart[s_date]\r
-                        smart_data = SAIDiskSmartFields()\r
-                        smart_data.tags['disk_name'] = str(dev_name)\r
-                        smart_data.fields['cluster_domain_id'] = str(cluster_id)\r
-                        smart_data.tags['host_domain_id'] = \\r
-                            str('%s_%s'\r
-                                % (cluster_id, osds_meta.get('hostname', 'None')))\r
-                        smart_data.fields['agenthost'] = str(socket.gethostname())\r
-                        smart_data.tags['agenthost_domain_id'] = \\r
-                            str('%s_%s' % (cluster_id, smart_data.fields['agenthost']))\r
-                        # parse attributes\r
-                        ata_smart = s_val.get('ata_smart_attributes', {})\r
-                        for attr in ata_smart.get('table', []):\r
-                            if attr.get('raw', {}).get('string'):\r
-                                if str(attr.get('raw', {}).get('string', '0')).isdigit():\r
-                                    smart_data.fields['%s_raw' % attr.get('id')] = \\r
-                                        int(attr.get('raw', {}).get('string', '0'))\r
-                                else:\r
-                                    if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit():\r
-                                        smart_data.fields['%s_raw' % attr.get('id')] = \\r
-                                            int(attr.get('raw', {}).get('string', '0').split(' ')[0])\r
-                                    else:\r
-                                        smart_data.fields['%s_raw' % attr.get('id')] = \\r
-                                            attr.get('raw', {}).get('value', 0)\r
-                        smart_data.fields['raw_data'] = str(json.dumps(osds_smart[s_date]).replace("\"", "\'"))\r
-                        if s_val.get('temperature', {}).get('current') is not None:\r
-                            smart_data.fields['CurrentDriveTemperature_raw'] = \\r
-                                int(s_val['temperature']['current'])\r
-                        if s_val.get('temperature', {}).get('drive_trip') is not None:\r
-                            smart_data.fields['DriveTripTemperature_raw'] = \\r
-                                int(s_val['temperature']['drive_trip'])\r
-                        if s_val.get('elements_grown_list') is not None:\r
-                            smart_data.fields['ElementsInGrownDefectList_raw'] = int(s_val['elements_grown_list'])\r
-                        if s_val.get('power_on_time', {}).get('hours') is not None:\r
-                            smart_data.fields['9_raw'] = int(s_val['power_on_time']['hours'])\r
-                        if s_val.get('scsi_percentage_used_endurance_indicator') is not None:\r
-                            smart_data.fields['PercentageUsedEnduranceIndicator_raw'] = \\r
-                                int(s_val['scsi_percentage_used_endurance_indicator'])\r
-                        if s_val.get('scsi_error_counter_log') is not None:\r
-                            s_err_counter = s_val['scsi_error_counter_log']\r
-                            for s_key in s_err_counter.keys():\r
-                                if s_key.lower() in ['read', 'write']:\r
-                                    for s1_key in s_err_counter[s_key].keys():\r
-                                        if s1_key.lower() == 'errors_corrected_by_eccfast':\r
-                                            smart_data.fields['ErrorsCorrectedbyECCFast%s_raw' % s_key.capitalize()] = \\r
-                                                int(s_err_counter[s_key]['errors_corrected_by_eccfast'])\r
-                                        elif s1_key.lower() == 'errors_corrected_by_eccdelayed':\r
-                                            smart_data.fields['ErrorsCorrectedbyECCDelayed%s_raw' % s_key.capitalize()] = \\r
-                                                int(s_err_counter[s_key]['errors_corrected_by_eccdelayed'])\r
-                                        elif s1_key.lower() == 'errors_corrected_by_rereads_rewrites':\r
-                                            smart_data.fields['ErrorCorrectedByRereadsRewrites%s_raw' % s_key.capitalize()] = \\r
-                                                int(s_err_counter[s_key]['errors_corrected_by_rereads_rewrites'])\r
-                                        elif s1_key.lower() == 'total_errors_corrected':\r
-                                            smart_data.fields['TotalErrorsCorrected%s_raw' % s_key.capitalize()] = \\r
-                                                int(s_err_counter[s_key]['total_errors_corrected'])\r
-                                        elif s1_key.lower() == 'correction_algorithm_invocations':\r
-                                            smart_data.fields['CorrectionAlgorithmInvocations%s_raw' % s_key.capitalize()] = \\r
-                                                int(s_err_counter[s_key]['correction_algorithm_invocations'])\r
-                                        elif s1_key.lower() == 'gigabytes_processed':\r
-                                            smart_data.fields['GigaBytesProcessed%s_raw' % s_key.capitalize()] = \\r
-                                                float(s_err_counter[s_key]['gigabytes_processed'])\r
-                                        elif s1_key.lower() == 'total_uncorrected_errors':\r
-                                            smart_data.fields['TotalUncorrectedErrors%s_raw' % s_key.capitalize()] = \\r
-                                                int(s_err_counter[s_key]['total_uncorrected_errors'])\r
-\r
-                        serial_number = s_val.get('serial_number')\r
-                        wwn = s_val.get('wwn', {})\r
-                        wwpn = ''\r
-                        if wwn:\r
-                            wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))\r
-                            for k in wwn.keys():\r
-                                if k in ['naa', 't10', 'eui', 'iqn']:\r
-                                    wwpn = ('%X%s' % (wwn[k], wwpn)).lower()\r
-                                    break\r
-                        if wwpn:\r
-                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
-                            smart_data.tags['disk_wwn'] = str(wwpn)\r
-                            if serial_number:\r
-                                smart_data.fields['serial_number'] = str(serial_number)\r
-                            else:\r
-                                smart_data.fields['serial_number'] = str(wwpn)\r
-                        elif serial_number:\r
-                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
-                            smart_data.fields['serial_number'] = str(serial_number)\r
-                            if wwpn:\r
-                                smart_data.tags['disk_wwn'] = str(wwpn)\r
-                            else:\r
-                                smart_data.tags['disk_wwn'] = str(serial_number)\r
-                        else:\r
-                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
-                            smart_data.tags['disk_wwn'] = str(dev_name)\r
-                            smart_data.fields['serial_number'] = str(dev_name)\r
-                        smart_data.tags['primary_key'] = \\r
-                            str('%s%s%s'\r
-                                % (cluster_id,\r
-                                   smart_data.tags['host_domain_id'],\r
-                                   smart_data.tags['disk_domain_id']))\r
-                        smart_data.timestamp = \\r
-                            time.mktime(datetime.datetime.strptime(\r
-                                s_date, '%Y%m%d-%H%M%S').timetuple())\r
-                        self.data.append(smart_data)\r
diff --git a/src/pybind/mgr/diskprediction/agent/metrics/sai_host.py b/src/pybind/mgr/diskprediction/agent/metrics/sai_host.py
deleted file mode 100644 (file)
index f3fc8ba..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import AGENT_VERSION, MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class SAIHostFields(MetricsField):\r
-    """ SAI Host structure """\r
-    measurement = 'sai_host'\r
-\r
-    def __init__(self):\r
-        super(SAIHostFields, self).__init__()\r
-        self.tags['domain_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['cluster_domain_id'] = None\r
-        self.fields['name'] = None\r
-        self.fields['host_ip'] = None\r
-        self.fields['host_ipv6'] = None\r
-        self.fields['host_uuid'] = None\r
-        self.fields['os_type'] = str('ceph')\r
-        self.fields['os_name'] = None\r
-        self.fields['os_version'] = None\r
-        self.fields['agent_version'] = str(AGENT_VERSION)\r
-\r
-\r
-class SAIHostAgent(MetricsAgent):\r
-    measurement = 'sai_host'\r
-\r
-    def _collect_data(self):\r
-        db = ClusterAPI(self._module_inst)\r
-        cluster_id = db.get_cluster_id()\r
-\r
-        hosts = set()\r
-\r
-        osd_data = db.get_osds()\r
-        for _data in osd_data:\r
-            osd_id = _data['osd']\r
-            if not _data.get('in'):\r
-                continue\r
-            osd_addr = _data['public_addr'].split(':')[0]\r
-            osd_metadata = db.get_osd_metadata(osd_id)\r
-            if osd_metadata:\r
-                osd_host = osd_metadata.get('hostname', 'None')\r
-                if osd_host not in hosts:\r
-                    data = SAIHostFields()\r
-                    data.fields['agenthost'] = str(socket.gethostname())\r
-                    data.tags['agenthost_domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                    data.tags['domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, osd_host))\r
-                    data.fields['cluster_domain_id'] = str(cluster_id)\r
-                    data.fields['host_ip'] = osd_addr\r
-                    data.fields['host_uuid'] = \\r
-                        str('%s_%s' % (cluster_id, osd_host))\r
-                    data.fields['os_name'] = \\r
-                        osd_metadata.get('ceph_release', '')\r
-                    data.fields['os_version'] = \\r
-                        osd_metadata.get('ceph_version_short', '')\r
-                    data.fields['name'] = osd_host\r
-                    hosts.add(osd_host)\r
-                    self.data.append(data)\r
-\r
-        mons = db.get_mons()\r
-        for _data in mons:\r
-            mon_host = _data['name']\r
-            mon_addr = _data['public_addr'].split(':')[0]\r
-            if mon_host not in hosts:\r
-                data = SAIHostFields()\r
-                data.fields['agenthost'] = str(socket.gethostname())\r
-                data.tags['agenthost_domain_id'] = \\r
-                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                data.tags['domain_id'] = \\r
-                    str('%s_%s' % (cluster_id, mon_host))\r
-                data.fields['cluster_domain_id'] = str(cluster_id)\r
-                data.fields['host_ip'] = mon_addr\r
-                data.fields['host_uuid'] = \\r
-                    str('%s_%s' % (cluster_id, mon_host))\r
-                data.fields['name'] = mon_host\r
-                hosts.add((mon_host, mon_addr))\r
-                self.data.append(data)\r
-\r
-        file_systems = db.get_file_systems()\r
-        for _data in file_systems:\r
-            mds_info = _data.get('mdsmap').get('info')\r
-            for _gid in mds_info:\r
-                mds_data = mds_info[_gid]\r
-                mds_addr = mds_data.get('addr').split(':')[0]\r
-                mds_host = mds_data.get('name')\r
-                if mds_host not in hosts:\r
-                    data = SAIHostFields()\r
-                    data.fields['agenthost'] = str(socket.gethostname())\r
-                    data.tags['agenthost_domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
-                    data.tags['domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, mds_host))\r
-                    data.fields['cluster_domain_id'] = str(cluster_id)\r
-                    data.fields['host_ip'] = mds_addr\r
-                    data.fields['host_uuid'] = \\r
-                        str('%s_%s' % (cluster_id, mds_host))\r
-                    data.fields['name'] = mds_host\r
-                    hosts.add((mds_host, mds_addr))\r
-                    self.data.append(data)\r
diff --git a/src/pybind/mgr/diskprediction/agent/predict/__init__.py b/src/pybind/mgr/diskprediction/agent/predict/__init__.py
deleted file mode 100644 (file)
index 28e2eeb..0000000
+++ /dev/null
@@ -1 +0,0 @@
-from __future__ import absolute_import\r
diff --git a/src/pybind/mgr/diskprediction/agent/predict/prediction.py b/src/pybind/mgr/diskprediction/agent/predict/prediction.py
deleted file mode 100644 (file)
index 27ba333..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
-from __future__ import absolute_import\r
-import datetime\r
-\r
-from .. import BaseAgent\r
-from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-PREDICTION_FILE = '/var/tmp/disk_prediction.json'\r
-\r
-TIME_DAYS = 24*60*60\r
-TIME_WEEK = TIME_DAYS * 7\r
-\r
-\r
-class PredictionAgent(BaseAgent):\r
-\r
-    measurement = 'sai_disk_prediction'\r
-\r
-    @staticmethod\r
-    def _get_disk_type(is_ssd, vendor, model):\r
-        """ return type:\r
-            0: "Unknown", 1: "HDD",\r
-            2: "SSD",     3: "SSD NVME",\r
-            4: "SSD SAS", 5: "SSD SATA",\r
-            6: "HDD SAS", 7: "HDD SATA"\r
-        """\r
-        if is_ssd:\r
-            if vendor:\r
-                disk_type = 4\r
-            elif model:\r
-                disk_type = 5\r
-            else:\r
-                disk_type = 2\r
-        else:\r
-            if vendor:\r
-                disk_type = 6\r
-            elif model:\r
-                disk_type = 7\r
-            else:\r
-                disk_type = 1\r
-        return disk_type\r
-\r
-    def _store_prediction_result(self, result):\r
-        self._module_inst._prediction_result = result\r
-\r
-    def _parse_prediction_data(self, host_domain_id, disk_domain_id):\r
-        result = {}\r
-        try:\r
-            query_info = self._client.query_info(\r
-                host_domain_id, disk_domain_id, 'sai_disk_prediction')\r
-            status_code = query_info.status_code\r
-            if status_code == 200:\r
-                result = query_info.json()\r
-                self._module_inst.status = {'status': DP_MGR_STAT_OK}\r
-            else:\r
-                resp = query_info.json()\r
-                if resp.get('error'):\r
-                    self._logger.error(str(resp['error']))\r
-                    self._module_inst.status = \\r
-                        {'status': DP_MGR_STAT_FAILED,\r
-                         'reason': 'failed to parse device {} prediction data'.format(disk_domain_id)}\r
-        except Exception as e:\r
-            self._logger.error(str(e))\r
-        return result\r
-\r
-    @staticmethod\r
-    def _convert_timestamp(predicted_timestamp, life_expectancy_day):\r
-        """\r
-\r
-        :param predicted_timestamp: unit is nanoseconds\r
-        :param life_expectancy_day: unit is seconds\r
-        :return:\r
-            date format '%Y-%m-%d' ex. 2018-01-01\r
-        """\r
-        return datetime.datetime.fromtimestamp(\r
-            predicted_timestamp / (1000 ** 3) + life_expectancy_day).strftime('%Y-%m-%d')\r
-\r
-    def _fetch_prediction_result(self):\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        cluster_id = obj_api.get_cluster_id()\r
-\r
-        result = {}\r
-        osds = obj_api.get_osds()\r
-        for osd in osds:\r
-            osd_id = osd.get('osd')\r
-            if osd_id is None:\r
-                continue\r
-            if not osd.get('in'):\r
-                continue\r
-            osds_meta = obj_api.get_osd_metadata(osd_id)\r
-            if not osds_meta:\r
-                continue\r
-            osds_smart = obj_api.get_osd_smart(osd_id)\r
-            if not osds_smart:\r
-                continue\r
-\r
-            hostname = osds_meta.get('hostname', 'None')\r
-            host_domain_id = '%s_%s' % (cluster_id, hostname)\r
-\r
-            for dev_name, s_val in osds_smart.iteritems():\r
-                is_ssd = True if s_val.get('rotation_rate') == 0 else False\r
-                vendor = s_val.get('vendor', '')\r
-                model = s_val.get('model_name', '')\r
-                disk_type = self._get_disk_type(is_ssd, vendor, model)\r
-                serial_number = s_val.get('serial_number')\r
-                wwn = s_val.get('wwn', {})\r
-                wwpn = ''\r
-                if wwn:\r
-                    wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))\r
-                    for k in wwn.keys():\r
-                        if k in ['naa', 't10', 'eui', 'iqn']:\r
-                            wwpn = ('%X%s' % (wwn[k], wwpn)).lower()\r
-                            break\r
-\r
-                tmp = {}\r
-                if wwpn:\r
-                    tmp['disk_domain_id'] = dev_name\r
-                    tmp['disk_wwn'] = wwpn\r
-                    if serial_number:\r
-                        tmp['serial_number'] = serial_number\r
-                    else:\r
-                        tmp['serial_number'] = wwpn\r
-                elif serial_number:\r
-                    tmp['disk_domain_id'] = dev_name\r
-                    tmp['serial_number'] = serial_number\r
-                    if wwpn:\r
-                        tmp['disk_wwn'] = wwpn\r
-                    else:\r
-                        tmp['disk_wwn'] = serial_number\r
-                else:\r
-                    tmp['disk_domain_id'] = dev_name\r
-                    tmp['disk_wwn'] = dev_name\r
-                    tmp['serial_number'] = dev_name\r
-\r
-                if s_val.get('smart_status', {}).get('passed'):\r
-                    tmp['smart_health_status'] = 'PASSED'\r
-                else:\r
-                    tmp['smart_health_status'] = 'FAILED'\r
-\r
-                tmp['sata_version'] = s_val.get('sata_version', {}).get('string', '')\r
-                tmp['sector_size'] = str(s_val.get('logical_block_size', ''))\r
-                try:\r
-                    if isinstance(s_val.get('user_capacity'), dict):\r
-                        user_capacity = \\r
-                            s_val['user_capacity'].get('bytes', {}).get('n', 0)\r
-                    else:\r
-                        user_capacity = s_val.get('user_capacity', 0)\r
-                except ValueError:\r
-                    user_capacity = 0\r
-                disk_info = {\r
-                    'disk_name': dev_name,\r
-                    'disk_type': str(disk_type),\r
-                    'disk_status': '1',\r
-                    'disk_wwn': tmp['disk_wwn'],\r
-                    'dp_disk_idd': tmp['disk_domain_id'],\r
-                    'serial_number': tmp['serial_number'],\r
-                    'vendor': vendor,\r
-                    'sata_version': tmp['sata_version'],\r
-                    'smart_healthStatus': tmp['smart_health_status'],\r
-                    'sector_size': tmp['sector_size'],\r
-                    'size': str(user_capacity),\r
-                    'prediction': self._parse_prediction_data(\r
-                        host_domain_id, tmp['disk_domain_id'])\r
-                }\r
-                # Update osd life-expectancy\r
-                predicted = None\r
-                life_expectancy_day_min = None\r
-                life_expectancy_day_max = None\r
-                devs_info = obj_api.get_osd_device_id(osd_id)\r
-                if disk_info.get('prediction', {}).get('predicted'):\r
-                    predicted = int(disk_info['prediction']['predicted'])\r
-                if disk_info.get('prediction', {}).get('near_failure'):\r
-                    if disk_info['prediction']['near_failure'].lower() == 'good':\r
-                        life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS\r
-                        life_expectancy_day_max = None\r
-                    elif disk_info['prediction']['near_failure'].lower() == 'warning':\r
-                        life_expectancy_day_min = (TIME_WEEK * 2)\r
-                        life_expectancy_day_max = (TIME_WEEK * 6)\r
-                    elif disk_info['prediction']['near_failure'].lower() == 'bad':\r
-                        life_expectancy_day_min = 0\r
-                        life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS\r
-                    else:\r
-                        # Near failure state is unknown.\r
-                        predicted = None\r
-                        life_expectancy_day_min = None\r
-                        life_expectancy_day_max = None\r
-\r
-                if predicted and tmp['disk_domain_id'] and life_expectancy_day_min:\r
-                    from_date = None\r
-                    to_date = None\r
-                    try:\r
-                        if life_expectancy_day_min:\r
-                            from_date = self._convert_timestamp(predicted, life_expectancy_day_min)\r
-\r
-                        if life_expectancy_day_max:\r
-                            to_date = self._convert_timestamp(predicted, life_expectancy_day_max)\r
-\r
-                        obj_api.set_device_life_expectancy(tmp['disk_domain_id'], from_date, to_date)\r
-                        self._logger.info(\r
-                            'succeed to set device {} life expectancy from: {}, to: {}'.format(\r
-                                tmp['disk_domain_id'], from_date, to_date))\r
-                    except Exception as e:\r
-                        self._logger.error(\r
-                            'failed to set device {} life expectancy from: {}, to: {}, {}'.format(\r
-                                tmp['disk_domain_id'], from_date, to_date, str(e)))\r
-                else:\r
-                    if tmp['disk_domain_id']:\r
-                        obj_api.reset_device_life_expectancy(tmp['disk_domain_id'])\r
-                if tmp['disk_domain_id']:\r
-                    result[tmp['disk_domain_id']] = disk_info\r
-\r
-        return result\r
-\r
-    def run(self):\r
-        result = self._fetch_prediction_result()\r
-        if result:\r
-            self._store_prediction_result(result)\r
diff --git a/src/pybind/mgr/diskprediction/common/__init__.py b/src/pybind/mgr/diskprediction/common/__init__.py
deleted file mode 100644 (file)
index cbc3c30..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-from __future__ import absolute_import\r
-import errno\r
-from functools import wraps\r
-from six.moves.http_client import BAD_REQUEST\r
-import os\r
-import signal\r
-\r
-\r
-DP_MGR_STAT_OK = 'OK'\r
-DP_MGR_STAT_WARNING = 'WARNING'\r
-DP_MGR_STAT_FAILED = 'FAILED'\r
-DP_MGR_STAT_DISABLED = 'DISABLED'\r
-DP_MGR_STAT_ENABLED = 'ENABLED'\r
-\r
-\r
-class DummyResonse:\r
-    def __init__(self):\r
-        self.resp_json = dict()\r
-        self.content = 'DummyResponse'\r
-        self.status_code = BAD_REQUEST\r
-\r
-    def json(self):\r
-        return self.resp_json\r
-\r
-\r
-class TimeoutError(Exception):\r
-    pass\r
-\r
-\r
-def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):\r
-    def decorator(func):\r
-        def _handle_timeout(signum, frame):\r
-            raise TimeoutError(error_message)\r
-\r
-        def wrapper(*args, **kwargs):\r
-            if hasattr(args[0], '_timeout') is not None:\r
-                seconds = args[0]._timeout\r
-            signal.signal(signal.SIGALRM, _handle_timeout)\r
-            signal.alarm(seconds)\r
-            try:\r
-                result = func(*args, **kwargs)\r
-            finally:\r
-                signal.alarm(0)\r
-            return result\r
-\r
-        return wraps(func)(wrapper)\r
-\r
-    return decorator\r
-\r
-\r
-def get_human_readable(size, precision=2):\r
-    suffixes = ['B', 'KB', 'MB', 'GB', 'TB']\r
-    suffix_index = 0\r
-    while size > 1000 and suffix_index < 4:\r
-        # increment the index of the suffix\r
-        suffix_index += 1\r
-        # apply the division\r
-        size = size/1000.0\r
-    return '%.*d %s' % (precision, size, suffixes[suffix_index])\r
diff --git a/src/pybind/mgr/diskprediction/common/client_pb2.py b/src/pybind/mgr/diskprediction/common/client_pb2.py
deleted file mode 100644 (file)
index 9f65c73..0000000
+++ /dev/null
@@ -1,1775 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: mainServer.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='mainServer.proto',
-  package='proto',
-  syntax='proto3',
-  serialized_pb=_b('\n\x10mainServer.proto\x12\x05proto\x1a\x1cgoogle/api/annotations.proto\"\x07\n\x05\x45mpty\"#\n\x10GeneralMsgOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\")\n\x16GeneralHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x1d\n\nPingOutout\x12\x0f\n\x07message\x18\x01 \x01(\t\"*\n\tTestInput\x12\x1d\n\x06people\x18\x01 \x03(\x0b\x32\r.proto.Person\"\xbe\x01\n\nTestOutput\x12\x10\n\x08strArray\x18\x01 \x03(\t\x12\x31\n\x08mapValue\x18\x02 \x03(\x0b\x32\x1f.proto.TestOutput.MapValueEntry\x12\x19\n\x02pn\x18\x04 \x01(\x0b\x32\r.proto.Person\x12\x1f\n\x07profile\x18\x03 \x03(\x0b\x32\x0e.proto.Profile\x1a/\n\rMapValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcf\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x03\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12)\n\x06phones\x18\x04 \x03(\x0b\x32\x19.proto.Person.PhoneNumber\x1a\x44\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0e\x32\x17.proto.Person.PhoneType\"+\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\"\xa9\x01\n\x07Profile\x12%\n\x08\x66ileInfo\x18\x01 \x01(\x0b\x32\x13.proto.Profile.File\x1aw\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\ttypeInt32\x18\x02 \x01(\x05\x12\x11\n\ttypeInt64\x18\x03 \x01(\x03\x12\x11\n\ttypeFloat\x18\x04 \x01(\x02\x12\x12\n\ntypeDouble\x18\x05 \x01(\x01\x12\x14\n\x0c\x62ooleanValue\x18\x06 \x01(\x08\"4\n\x15GetUsersByStatusInput\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\":\n\x16GetUsersByStatusOutput\x12 \n\x05users\x18\x01 \x03(\x0b\x32\x11.proto.UserOutput\")\n\x16\x41\x63\x63ountHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\nLoginInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\xf2\x01\n\nUserOutput\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05phone\x18\x04 \x01(\t\x12\x11\n\tfirstName\x18\x05 \x01(\t\x12\x10\n\x08lastName\x18\x06 \x01(\t\x12\x13\n\x0b\x63reatedTime\x18\x07 \x01(\t\x12\x11\n\tnamespace\x18\x08 \x01(\t\x12\x12\n\ndomainName\x18\t \x01(\t\x12\x0f\n\x07\x63ompany\x18\n \x01(\t\x12\x0b\n\x03url\x18\x0b \x01(\t\x12\x14\n\x0c\x61gentAccount\x18\x0c \x01(\t\x12\x15\n\ragentPassword\x18\r \x01(\t\"s\n\x0bSingupInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\r\n\x05phone\x18\x02 \x01(\t\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x10\n\x08password\x18\x05 \x01(\t\x12\x0f\n\x07\x63ompany\x18\x06 \x01(\t\"\x1f\n\x0cSingupOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\x0f\x44\x65leteUserInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"C\n\x15UpdateUserStatusInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\"\'\n\x16ResendConfirmCodeInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\"+\n\x0c\x43onfirmInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\"$\n\x11\x44PHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"n\n\x17\x44PGetPhysicalDisksInput\x12\x0f\n\x07hostIds\x18\x01 \x01(\t\x12\x0b\n\x03ids\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"{\n\x19\x44PGetDisksPredictionInput\x12\x17\n\x0fphysicalDiskIds\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"\x1e\n\x0e\x44PBinaryOutput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\",\n\x19\x43ollectionHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\"\n\x10PostMetricsInput\x12\x0e\n\x06points\x18\x01 \x03(\t\" \n\x10PostDBRelayInput\x12\x0c\n\x04\x63mds\x18\x01 \x03(\t\":\n\x17\x43ollectionMessageOutput\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2\x85\x02\n\x07General\x12\x63\n\x10GeneralHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.GeneralHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/general/heartbeat\x12\x46\n\x04Ping\x12\x0c.proto.Empty\x1a\x11.proto.PingOutout\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/general/ping\x12M\n\x04Test\x12\x10.proto.TestInput\x1a\x11.proto.TestOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/general/test:\x01*2\xa4\x06\n\x07\x41\x63\x63ount\x12\x63\n\x10\x41\x63\x63ountHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.AccountHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/account/heartbeat\x12N\n\x05Login\x12\x11.proto.LoginInput\x1a\x11.proto.UserOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x14/apis/v2/users/login:\x01*\x12S\n\x06Signup\x12\x12.proto.SingupInput\x1a\x13.proto.SingupOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/users/signup:\x01*\x12r\n\x11ResendConfirmCode\x12\x1d.proto.ResendConfirmCodeInput\x1a\x17.proto.GeneralMsgOutput\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/apis/v2/users/confirmcode:\x01*\x12_\n\x07\x43onfirm\x12\x13.proto.ConfirmInput\x1a\x17.proto.GeneralMsgOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/users/confirmation:\x01*\x12g\n\x10GetUsersByStatus\x12\x1c.proto.GetUsersByStatusInput\x1a\x1d.proto.GetUsersByStatusOutput\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/apis/v2/users\x12\x63\n\nDeleteUser\x12\x16.proto.DeleteUserInput\x1a\x17.proto.GeneralMsgOutput\"$\x82\xd3\xe4\x93\x02\x1e*\x1c/apis/v2/users/{email}/{key}\x12l\n\x10UpdateUserStatus\x12\x1c.proto.UpdateUserStatusInput\x1a\x17.proto.GeneralMsgOutput\"!\x82\xd3\xe4\x93\x02\x1b\x1a\x16/apis/v2/users/{email}:\x01*2\xcf\x02\n\x0b\x44iskprophet\x12T\n\x0b\x44PHeartbeat\x12\x0c.proto.Empty\x1a\x18.proto.DPHeartbeatOutput\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/dp/heartbeat\x12l\n\x12\x44PGetPhysicalDisks\x12\x1e.proto.DPGetPhysicalDisksInput\x1a\x15.proto.DPBinaryOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/apis/v2/physical-disks\x12|\n\x14\x44PGetDisksPrediction\x12 .proto.DPGetDisksPredictionInput\x1a\x15.proto.DPBinaryOutput\"+\x82\xd3\xe4\x93\x02%\x12#/apis/v2/physical-disks/predictions2\xdb\x02\n\nCollection\x12l\n\x13\x43ollectionHeartbeat\x12\x0c.proto.Empty\x1a .proto.CollectionHeartbeatOutput\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/apis/v2/collection/heartbeat\x12o\n\x0bPostDBRelay\x12\x17.proto.PostDBRelayInput\x1a\x1e.proto.CollectionMessageOutput\"\'\x82\xd3\xe4\x93\x02!\"\x1c/apis/v2/collection/relation:\x01*\x12n\n\x0bPostMetrics\x12\x17.proto.PostMetricsInput\x1a\x1e.proto.CollectionMessageOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/collection/metrics:\x01*b\x06proto3')
-  ,
-  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
-
-
-
-_PERSON_PHONETYPE = _descriptor.EnumDescriptor(
-  name='PhoneType',
-  full_name='proto.Person.PhoneType',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='MOBILE', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='HOME', index=1, number=1,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='WORK', index=2, number=2,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=579,
-  serialized_end=622,
-)
-_sym_db.RegisterEnumDescriptor(_PERSON_PHONETYPE)
-
-
-_EMPTY = _descriptor.Descriptor(
-  name='Empty',
-  full_name='proto.Empty',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=57,
-  serialized_end=64,
-)
-
-
-_GENERALMSGOUTPUT = _descriptor.Descriptor(
-  name='GeneralMsgOutput',
-  full_name='proto.GeneralMsgOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.GeneralMsgOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=66,
-  serialized_end=101,
-)
-
-
-_GENERALHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='GeneralHeartbeatOutput',
-  full_name='proto.GeneralHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.GeneralHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=103,
-  serialized_end=144,
-)
-
-
-_PINGOUTOUT = _descriptor.Descriptor(
-  name='PingOutout',
-  full_name='proto.PingOutout',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.PingOutout.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=146,
-  serialized_end=175,
-)
-
-
-_TESTINPUT = _descriptor.Descriptor(
-  name='TestInput',
-  full_name='proto.TestInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='people', full_name='proto.TestInput.people', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=177,
-  serialized_end=219,
-)
-
-
-_TESTOUTPUT_MAPVALUEENTRY = _descriptor.Descriptor(
-  name='MapValueEntry',
-  full_name='proto.TestOutput.MapValueEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.TestOutput.MapValueEntry.key', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='proto.TestOutput.MapValueEntry.value', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=365,
-  serialized_end=412,
-)
-
-_TESTOUTPUT = _descriptor.Descriptor(
-  name='TestOutput',
-  full_name='proto.TestOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='strArray', full_name='proto.TestOutput.strArray', index=0,
-      number=1, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='mapValue', full_name='proto.TestOutput.mapValue', index=1,
-      number=2, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='pn', full_name='proto.TestOutput.pn', index=2,
-      number=4, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='profile', full_name='proto.TestOutput.profile', index=3,
-      number=3, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_TESTOUTPUT_MAPVALUEENTRY, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=222,
-  serialized_end=412,
-)
-
-
-_PERSON_PHONENUMBER = _descriptor.Descriptor(
-  name='PhoneNumber',
-  full_name='proto.Person.PhoneNumber',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='number', full_name='proto.Person.PhoneNumber.number', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='type', full_name='proto.Person.PhoneNumber.type', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=509,
-  serialized_end=577,
-)
-
-_PERSON = _descriptor.Descriptor(
-  name='Person',
-  full_name='proto.Person',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='name', full_name='proto.Person.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='id', full_name='proto.Person.id', index=1,
-      number=2, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.Person.email', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='phones', full_name='proto.Person.phones', index=3,
-      number=4, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_PERSON_PHONENUMBER, ],
-  enum_types=[
-    _PERSON_PHONETYPE,
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=415,
-  serialized_end=622,
-)
-
-
-_PROFILE_FILE = _descriptor.Descriptor(
-  name='File',
-  full_name='proto.Profile.File',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='name', full_name='proto.Profile.File.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeInt32', full_name='proto.Profile.File.typeInt32', index=1,
-      number=2, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeInt64', full_name='proto.Profile.File.typeInt64', index=2,
-      number=3, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeFloat', full_name='proto.Profile.File.typeFloat', index=3,
-      number=4, type=2, cpp_type=6, label=1,
-      has_default_value=False, default_value=float(0),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeDouble', full_name='proto.Profile.File.typeDouble', index=4,
-      number=5, type=1, cpp_type=5, label=1,
-      has_default_value=False, default_value=float(0),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='booleanValue', full_name='proto.Profile.File.booleanValue', index=5,
-      number=6, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=675,
-  serialized_end=794,
-)
-
-_PROFILE = _descriptor.Descriptor(
-  name='Profile',
-  full_name='proto.Profile',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='fileInfo', full_name='proto.Profile.fileInfo', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_PROFILE_FILE, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=625,
-  serialized_end=794,
-)
-
-
-_GETUSERSBYSTATUSINPUT = _descriptor.Descriptor(
-  name='GetUsersByStatusInput',
-  full_name='proto.GetUsersByStatusInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.GetUsersByStatusInput.status', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.GetUsersByStatusInput.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=796,
-  serialized_end=848,
-)
-
-
-_GETUSERSBYSTATUSOUTPUT = _descriptor.Descriptor(
-  name='GetUsersByStatusOutput',
-  full_name='proto.GetUsersByStatusOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='users', full_name='proto.GetUsersByStatusOutput.users', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=850,
-  serialized_end=908,
-)
-
-
-_ACCOUNTHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='AccountHeartbeatOutput',
-  full_name='proto.AccountHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.AccountHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=910,
-  serialized_end=951,
-)
-
-
-_LOGININPUT = _descriptor.Descriptor(
-  name='LoginInput',
-  full_name='proto.LoginInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.LoginInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='password', full_name='proto.LoginInput.password', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=953,
-  serialized_end=998,
-)
-
-
-_USEROUTPUT = _descriptor.Descriptor(
-  name='UserOutput',
-  full_name='proto.UserOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='id', full_name='proto.UserOutput.id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.UserOutput.email', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.UserOutput.status', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='phone', full_name='proto.UserOutput.phone', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='firstName', full_name='proto.UserOutput.firstName', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='lastName', full_name='proto.UserOutput.lastName', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='createdTime', full_name='proto.UserOutput.createdTime', index=6,
-      number=7, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='namespace', full_name='proto.UserOutput.namespace', index=7,
-      number=8, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='domainName', full_name='proto.UserOutput.domainName', index=8,
-      number=9, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='company', full_name='proto.UserOutput.company', index=9,
-      number=10, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='url', full_name='proto.UserOutput.url', index=10,
-      number=11, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='agentAccount', full_name='proto.UserOutput.agentAccount', index=11,
-      number=12, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='agentPassword', full_name='proto.UserOutput.agentPassword', index=12,
-      number=13, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1001,
-  serialized_end=1243,
-)
-
-
-_SINGUPINPUT = _descriptor.Descriptor(
-  name='SingupInput',
-  full_name='proto.SingupInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.SingupInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='phone', full_name='proto.SingupInput.phone', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='firstName', full_name='proto.SingupInput.firstName', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='lastName', full_name='proto.SingupInput.lastName', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='password', full_name='proto.SingupInput.password', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='company', full_name='proto.SingupInput.company', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1245,
-  serialized_end=1360,
-)
-
-
-_SINGUPOUTPUT = _descriptor.Descriptor(
-  name='SingupOutput',
-  full_name='proto.SingupOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.SingupOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1362,
-  serialized_end=1393,
-)
-
-
-_DELETEUSERINPUT = _descriptor.Descriptor(
-  name='DeleteUserInput',
-  full_name='proto.DeleteUserInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.DeleteUserInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.DeleteUserInput.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1395,
-  serialized_end=1440,
-)
-
-
-_UPDATEUSERSTATUSINPUT = _descriptor.Descriptor(
-  name='UpdateUserStatusInput',
-  full_name='proto.UpdateUserStatusInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.UpdateUserStatusInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.UpdateUserStatusInput.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.UpdateUserStatusInput.status', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1442,
-  serialized_end=1509,
-)
-
-
-_RESENDCONFIRMCODEINPUT = _descriptor.Descriptor(
-  name='ResendConfirmCodeInput',
-  full_name='proto.ResendConfirmCodeInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.ResendConfirmCodeInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1511,
-  serialized_end=1550,
-)
-
-
-_CONFIRMINPUT = _descriptor.Descriptor(
-  name='ConfirmInput',
-  full_name='proto.ConfirmInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.ConfirmInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='code', full_name='proto.ConfirmInput.code', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1552,
-  serialized_end=1595,
-)
-
-
-_DPHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='DPHeartbeatOutput',
-  full_name='proto.DPHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.DPHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1597,
-  serialized_end=1633,
-)
-
-
-_DPGETPHYSICALDISKSINPUT = _descriptor.Descriptor(
-  name='DPGetPhysicalDisksInput',
-  full_name='proto.DPGetPhysicalDisksInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='hostIds', full_name='proto.DPGetPhysicalDisksInput.hostIds', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='ids', full_name='proto.DPGetPhysicalDisksInput.ids', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='limit', full_name='proto.DPGetPhysicalDisksInput.limit', index=2,
-      number=3, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='page', full_name='proto.DPGetPhysicalDisksInput.page', index=3,
-      number=4, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='from', full_name='proto.DPGetPhysicalDisksInput.from', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='to', full_name='proto.DPGetPhysicalDisksInput.to', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1635,
-  serialized_end=1745,
-)
-
-
-_DPGETDISKSPREDICTIONINPUT = _descriptor.Descriptor(
-  name='DPGetDisksPredictionInput',
-  full_name='proto.DPGetDisksPredictionInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='physicalDiskIds', full_name='proto.DPGetDisksPredictionInput.physicalDiskIds', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.DPGetDisksPredictionInput.status', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='limit', full_name='proto.DPGetDisksPredictionInput.limit', index=2,
-      number=3, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='page', full_name='proto.DPGetDisksPredictionInput.page', index=3,
-      number=4, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='from', full_name='proto.DPGetDisksPredictionInput.from', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='to', full_name='proto.DPGetDisksPredictionInput.to', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1747,
-  serialized_end=1870,
-)
-
-
-_DPBINARYOUTPUT = _descriptor.Descriptor(
-  name='DPBinaryOutput',
-  full_name='proto.DPBinaryOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='data', full_name='proto.DPBinaryOutput.data', index=0,
-      number=1, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1872,
-  serialized_end=1902,
-)
-
-
-_COLLECTIONHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='CollectionHeartbeatOutput',
-  full_name='proto.CollectionHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.CollectionHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1904,
-  serialized_end=1948,
-)
-
-
-_POSTMETRICSINPUT = _descriptor.Descriptor(
-  name='PostMetricsInput',
-  full_name='proto.PostMetricsInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='points', full_name='proto.PostMetricsInput.points', index=0,
-      number=1, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1950,
-  serialized_end=1984,
-)
-
-
-_POSTDBRELAYINPUT = _descriptor.Descriptor(
-  name='PostDBRelayInput',
-  full_name='proto.PostDBRelayInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='cmds', full_name='proto.PostDBRelayInput.cmds', index=0,
-      number=1, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1986,
-  serialized_end=2018,
-)
-
-
-_COLLECTIONMESSAGEOUTPUT = _descriptor.Descriptor(
-  name='CollectionMessageOutput',
-  full_name='proto.CollectionMessageOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.CollectionMessageOutput.status', index=0,
-      number=1, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.CollectionMessageOutput.message', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=2020,
-  serialized_end=2078,
-)
-
-_TESTINPUT.fields_by_name['people'].message_type = _PERSON
-_TESTOUTPUT_MAPVALUEENTRY.containing_type = _TESTOUTPUT
-_TESTOUTPUT.fields_by_name['mapValue'].message_type = _TESTOUTPUT_MAPVALUEENTRY
-_TESTOUTPUT.fields_by_name['pn'].message_type = _PERSON
-_TESTOUTPUT.fields_by_name['profile'].message_type = _PROFILE
-_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE
-_PERSON_PHONENUMBER.containing_type = _PERSON
-_PERSON.fields_by_name['phones'].message_type = _PERSON_PHONENUMBER
-_PERSON_PHONETYPE.containing_type = _PERSON
-_PROFILE_FILE.containing_type = _PROFILE
-_PROFILE.fields_by_name['fileInfo'].message_type = _PROFILE_FILE
-_GETUSERSBYSTATUSOUTPUT.fields_by_name['users'].message_type = _USEROUTPUT
-DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
-DESCRIPTOR.message_types_by_name['GeneralMsgOutput'] = _GENERALMSGOUTPUT
-DESCRIPTOR.message_types_by_name['GeneralHeartbeatOutput'] = _GENERALHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['PingOutout'] = _PINGOUTOUT
-DESCRIPTOR.message_types_by_name['TestInput'] = _TESTINPUT
-DESCRIPTOR.message_types_by_name['TestOutput'] = _TESTOUTPUT
-DESCRIPTOR.message_types_by_name['Person'] = _PERSON
-DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE
-DESCRIPTOR.message_types_by_name['GetUsersByStatusInput'] = _GETUSERSBYSTATUSINPUT
-DESCRIPTOR.message_types_by_name['GetUsersByStatusOutput'] = _GETUSERSBYSTATUSOUTPUT
-DESCRIPTOR.message_types_by_name['AccountHeartbeatOutput'] = _ACCOUNTHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['LoginInput'] = _LOGININPUT
-DESCRIPTOR.message_types_by_name['UserOutput'] = _USEROUTPUT
-DESCRIPTOR.message_types_by_name['SingupInput'] = _SINGUPINPUT
-DESCRIPTOR.message_types_by_name['SingupOutput'] = _SINGUPOUTPUT
-DESCRIPTOR.message_types_by_name['DeleteUserInput'] = _DELETEUSERINPUT
-DESCRIPTOR.message_types_by_name['UpdateUserStatusInput'] = _UPDATEUSERSTATUSINPUT
-DESCRIPTOR.message_types_by_name['ResendConfirmCodeInput'] = _RESENDCONFIRMCODEINPUT
-DESCRIPTOR.message_types_by_name['ConfirmInput'] = _CONFIRMINPUT
-DESCRIPTOR.message_types_by_name['DPHeartbeatOutput'] = _DPHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['DPGetPhysicalDisksInput'] = _DPGETPHYSICALDISKSINPUT
-DESCRIPTOR.message_types_by_name['DPGetDisksPredictionInput'] = _DPGETDISKSPREDICTIONINPUT
-DESCRIPTOR.message_types_by_name['DPBinaryOutput'] = _DPBINARYOUTPUT
-DESCRIPTOR.message_types_by_name['CollectionHeartbeatOutput'] = _COLLECTIONHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['PostMetricsInput'] = _POSTMETRICSINPUT
-DESCRIPTOR.message_types_by_name['PostDBRelayInput'] = _POSTDBRELAYINPUT
-DESCRIPTOR.message_types_by_name['CollectionMessageOutput'] = _COLLECTIONMESSAGEOUTPUT
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
-  DESCRIPTOR = _EMPTY,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.Empty)
-  ))
-_sym_db.RegisterMessage(Empty)
-
-GeneralMsgOutput = _reflection.GeneratedProtocolMessageType('GeneralMsgOutput', (_message.Message,), dict(
-  DESCRIPTOR = _GENERALMSGOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GeneralMsgOutput)
-  ))
-_sym_db.RegisterMessage(GeneralMsgOutput)
-
-GeneralHeartbeatOutput = _reflection.GeneratedProtocolMessageType('GeneralHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _GENERALHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GeneralHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(GeneralHeartbeatOutput)
-
-PingOutout = _reflection.GeneratedProtocolMessageType('PingOutout', (_message.Message,), dict(
-  DESCRIPTOR = _PINGOUTOUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.PingOutout)
-  ))
-_sym_db.RegisterMessage(PingOutout)
-
-TestInput = _reflection.GeneratedProtocolMessageType('TestInput', (_message.Message,), dict(
-  DESCRIPTOR = _TESTINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.TestInput)
-  ))
-_sym_db.RegisterMessage(TestInput)
-
-TestOutput = _reflection.GeneratedProtocolMessageType('TestOutput', (_message.Message,), dict(
-
-  MapValueEntry = _reflection.GeneratedProtocolMessageType('MapValueEntry', (_message.Message,), dict(
-    DESCRIPTOR = _TESTOUTPUT_MAPVALUEENTRY,
-    __module__ = 'mainServer_pb2'
-    # @@protoc_insertion_point(class_scope:proto.TestOutput.MapValueEntry)
-    ))
-  ,
-  DESCRIPTOR = _TESTOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.TestOutput)
-  ))
-_sym_db.RegisterMessage(TestOutput)
-_sym_db.RegisterMessage(TestOutput.MapValueEntry)
-
-Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
-
-  PhoneNumber = _reflection.GeneratedProtocolMessageType('PhoneNumber', (_message.Message,), dict(
-    DESCRIPTOR = _PERSON_PHONENUMBER,
-    __module__ = 'mainServer_pb2'
-    # @@protoc_insertion_point(class_scope:proto.Person.PhoneNumber)
-    ))
-  ,
-  DESCRIPTOR = _PERSON,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.Person)
-  ))
-_sym_db.RegisterMessage(Person)
-_sym_db.RegisterMessage(Person.PhoneNumber)
-
-Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict(
-
-  File = _reflection.GeneratedProtocolMessageType('File', (_message.Message,), dict(
-    DESCRIPTOR = _PROFILE_FILE,
-    __module__ = 'mainServer_pb2'
-    # @@protoc_insertion_point(class_scope:proto.Profile.File)
-    ))
-  ,
-  DESCRIPTOR = _PROFILE,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.Profile)
-  ))
-_sym_db.RegisterMessage(Profile)
-_sym_db.RegisterMessage(Profile.File)
-
-GetUsersByStatusInput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusInput', (_message.Message,), dict(
-  DESCRIPTOR = _GETUSERSBYSTATUSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusInput)
-  ))
-_sym_db.RegisterMessage(GetUsersByStatusInput)
-
-GetUsersByStatusOutput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusOutput', (_message.Message,), dict(
-  DESCRIPTOR = _GETUSERSBYSTATUSOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusOutput)
-  ))
-_sym_db.RegisterMessage(GetUsersByStatusOutput)
-
-AccountHeartbeatOutput = _reflection.GeneratedProtocolMessageType('AccountHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _ACCOUNTHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.AccountHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(AccountHeartbeatOutput)
-
-LoginInput = _reflection.GeneratedProtocolMessageType('LoginInput', (_message.Message,), dict(
-  DESCRIPTOR = _LOGININPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.LoginInput)
-  ))
-_sym_db.RegisterMessage(LoginInput)
-
-UserOutput = _reflection.GeneratedProtocolMessageType('UserOutput', (_message.Message,), dict(
-  DESCRIPTOR = _USEROUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.UserOutput)
-  ))
-_sym_db.RegisterMessage(UserOutput)
-
-SingupInput = _reflection.GeneratedProtocolMessageType('SingupInput', (_message.Message,), dict(
-  DESCRIPTOR = _SINGUPINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.SingupInput)
-  ))
-_sym_db.RegisterMessage(SingupInput)
-
-SingupOutput = _reflection.GeneratedProtocolMessageType('SingupOutput', (_message.Message,), dict(
-  DESCRIPTOR = _SINGUPOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.SingupOutput)
-  ))
-_sym_db.RegisterMessage(SingupOutput)
-
-DeleteUserInput = _reflection.GeneratedProtocolMessageType('DeleteUserInput', (_message.Message,), dict(
-  DESCRIPTOR = _DELETEUSERINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DeleteUserInput)
-  ))
-_sym_db.RegisterMessage(DeleteUserInput)
-
-UpdateUserStatusInput = _reflection.GeneratedProtocolMessageType('UpdateUserStatusInput', (_message.Message,), dict(
-  DESCRIPTOR = _UPDATEUSERSTATUSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.UpdateUserStatusInput)
-  ))
-_sym_db.RegisterMessage(UpdateUserStatusInput)
-
-ResendConfirmCodeInput = _reflection.GeneratedProtocolMessageType('ResendConfirmCodeInput', (_message.Message,), dict(
-  DESCRIPTOR = _RESENDCONFIRMCODEINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.ResendConfirmCodeInput)
-  ))
-_sym_db.RegisterMessage(ResendConfirmCodeInput)
-
-ConfirmInput = _reflection.GeneratedProtocolMessageType('ConfirmInput', (_message.Message,), dict(
-  DESCRIPTOR = _CONFIRMINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.ConfirmInput)
-  ))
-_sym_db.RegisterMessage(ConfirmInput)
-
-DPHeartbeatOutput = _reflection.GeneratedProtocolMessageType('DPHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _DPHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(DPHeartbeatOutput)
-
-DPGetPhysicalDisksInput = _reflection.GeneratedProtocolMessageType('DPGetPhysicalDisksInput', (_message.Message,), dict(
-  DESCRIPTOR = _DPGETPHYSICALDISKSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPGetPhysicalDisksInput)
-  ))
-_sym_db.RegisterMessage(DPGetPhysicalDisksInput)
-
-DPGetDisksPredictionInput = _reflection.GeneratedProtocolMessageType('DPGetDisksPredictionInput', (_message.Message,), dict(
-  DESCRIPTOR = _DPGETDISKSPREDICTIONINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPGetDisksPredictionInput)
-  ))
-_sym_db.RegisterMessage(DPGetDisksPredictionInput)
-
-DPBinaryOutput = _reflection.GeneratedProtocolMessageType('DPBinaryOutput', (_message.Message,), dict(
-  DESCRIPTOR = _DPBINARYOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPBinaryOutput)
-  ))
-_sym_db.RegisterMessage(DPBinaryOutput)
-
-CollectionHeartbeatOutput = _reflection.GeneratedProtocolMessageType('CollectionHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _COLLECTIONHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.CollectionHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(CollectionHeartbeatOutput)
-
-PostMetricsInput = _reflection.GeneratedProtocolMessageType('PostMetricsInput', (_message.Message,), dict(
-  DESCRIPTOR = _POSTMETRICSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.PostMetricsInput)
-  ))
-_sym_db.RegisterMessage(PostMetricsInput)
-
-PostDBRelayInput = _reflection.GeneratedProtocolMessageType('PostDBRelayInput', (_message.Message,), dict(
-  DESCRIPTOR = _POSTDBRELAYINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.PostDBRelayInput)
-  ))
-_sym_db.RegisterMessage(PostDBRelayInput)
-
-CollectionMessageOutput = _reflection.GeneratedProtocolMessageType('CollectionMessageOutput', (_message.Message,), dict(
-  DESCRIPTOR = _COLLECTIONMESSAGEOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.CollectionMessageOutput)
-  ))
-_sym_db.RegisterMessage(CollectionMessageOutput)
-
-
-_TESTOUTPUT_MAPVALUEENTRY.has_options = True
-_TESTOUTPUT_MAPVALUEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-
-_GENERAL = _descriptor.ServiceDescriptor(
-  name='General',
-  full_name='proto.General',
-  file=DESCRIPTOR,
-  index=0,
-  options=None,
-  serialized_start=2081,
-  serialized_end=2342,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='GeneralHeartbeat',
-    full_name='proto.General.GeneralHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_GENERALHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/general/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Ping',
-    full_name='proto.General.Ping',
-    index=1,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_PINGOUTOUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/general/ping')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Test',
-    full_name='proto.General.Test',
-    index=2,
-    containing_service=None,
-    input_type=_TESTINPUT,
-    output_type=_TESTOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/general/test:\001*')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_GENERAL)
-
-DESCRIPTOR.services_by_name['General'] = _GENERAL
-
-
-_ACCOUNT = _descriptor.ServiceDescriptor(
-  name='Account',
-  full_name='proto.Account',
-  file=DESCRIPTOR,
-  index=1,
-  options=None,
-  serialized_start=2345,
-  serialized_end=3149,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='AccountHeartbeat',
-    full_name='proto.Account.AccountHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_ACCOUNTHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/account/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Login',
-    full_name='proto.Account.Login',
-    index=1,
-    containing_service=None,
-    input_type=_LOGININPUT,
-    output_type=_USEROUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\"\024/apis/v2/users/login:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Signup',
-    full_name='proto.Account.Signup',
-    index=2,
-    containing_service=None,
-    input_type=_SINGUPINPUT,
-    output_type=_SINGUPOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/users/signup:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='ResendConfirmCode',
-    full_name='proto.Account.ResendConfirmCode',
-    index=3,
-    containing_service=None,
-    input_type=_RESENDCONFIRMCODEINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\"\032/apis/v2/users/confirmcode:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Confirm',
-    full_name='proto.Account.Confirm',
-    index=4,
-    containing_service=None,
-    input_type=_CONFIRMINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/users/confirmation:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='GetUsersByStatus',
-    full_name='proto.Account.GetUsersByStatus',
-    index=5,
-    containing_service=None,
-    input_type=_GETUSERSBYSTATUSINPUT,
-    output_type=_GETUSERSBYSTATUSOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\020\022\016/apis/v2/users')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='DeleteUser',
-    full_name='proto.Account.DeleteUser',
-    index=6,
-    containing_service=None,
-    input_type=_DELETEUSERINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\036*\034/apis/v2/users/{email}/{key}')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='UpdateUserStatus',
-    full_name='proto.Account.UpdateUserStatus',
-    index=7,
-    containing_service=None,
-    input_type=_UPDATEUSERSTATUSINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\033\032\026/apis/v2/users/{email}:\001*')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_ACCOUNT)
-
-DESCRIPTOR.services_by_name['Account'] = _ACCOUNT
-
-
-_DISKPROPHET = _descriptor.ServiceDescriptor(
-  name='Diskprophet',
-  full_name='proto.Diskprophet',
-  file=DESCRIPTOR,
-  index=2,
-  options=None,
-  serialized_start=3152,
-  serialized_end=3487,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='DPHeartbeat',
-    full_name='proto.Diskprophet.DPHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_DPHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/dp/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='DPGetPhysicalDisks',
-    full_name='proto.Diskprophet.DPGetPhysicalDisks',
-    index=1,
-    containing_service=None,
-    input_type=_DPGETPHYSICALDISKSINPUT,
-    output_type=_DPBINARYOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\022\027/apis/v2/physical-disks')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='DPGetDisksPrediction',
-    full_name='proto.Diskprophet.DPGetDisksPrediction',
-    index=2,
-    containing_service=None,
-    input_type=_DPGETDISKSPREDICTIONINPUT,
-    output_type=_DPBINARYOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/apis/v2/physical-disks/predictions')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_DISKPROPHET)
-
-DESCRIPTOR.services_by_name['Diskprophet'] = _DISKPROPHET
-
-
-_COLLECTION = _descriptor.ServiceDescriptor(
-  name='Collection',
-  full_name='proto.Collection',
-  file=DESCRIPTOR,
-  index=3,
-  options=None,
-  serialized_start=3490,
-  serialized_end=3837,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='CollectionHeartbeat',
-    full_name='proto.Collection.CollectionHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_COLLECTIONHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\022\035/apis/v2/collection/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='PostDBRelay',
-    full_name='proto.Collection.PostDBRelay',
-    index=1,
-    containing_service=None,
-    input_type=_POSTDBRELAYINPUT,
-    output_type=_COLLECTIONMESSAGEOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002!\"\034/apis/v2/collection/relation:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='PostMetrics',
-    full_name='proto.Collection.PostMetrics',
-    index=2,
-    containing_service=None,
-    input_type=_POSTMETRICSINPUT,
-    output_type=_COLLECTIONMESSAGEOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/collection/metrics:\001*')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_COLLECTION)
-
-DESCRIPTOR.services_by_name['Collection'] = _COLLECTION
-
-# @@protoc_insertion_point(module_scope)
diff --git a/src/pybind/mgr/diskprediction/common/client_pb2_grpc.py b/src/pybind/mgr/diskprediction/common/client_pb2_grpc.py
deleted file mode 100644 (file)
index c1c3217..0000000
+++ /dev/null
@@ -1,395 +0,0 @@
-# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
-import grpc
-
-import client_pb2 as mainServer__pb2
-
-
-class GeneralStub(object):
-  """-------------------------- General -------------------------------------
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.GeneralHeartbeat = channel.unary_unary(
-        '/proto.General/GeneralHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralHeartbeatOutput.FromString,
-        )
-    self.Ping = channel.unary_unary(
-        '/proto.General/Ping',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.PingOutout.FromString,
-        )
-    self.Test = channel.unary_unary(
-        '/proto.General/Test',
-        request_serializer=mainServer__pb2.TestInput.SerializeToString,
-        response_deserializer=mainServer__pb2.TestOutput.FromString,
-        )
-
-
-class GeneralServicer(object):
-  """-------------------------- General -------------------------------------
-  """
-
-  def GeneralHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Ping(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Test(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_GeneralServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'GeneralHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.GeneralHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.GeneralHeartbeatOutput.SerializeToString,
-      ),
-      'Ping': grpc.unary_unary_rpc_method_handler(
-          servicer.Ping,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.PingOutout.SerializeToString,
-      ),
-      'Test': grpc.unary_unary_rpc_method_handler(
-          servicer.Test,
-          request_deserializer=mainServer__pb2.TestInput.FromString,
-          response_serializer=mainServer__pb2.TestOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.General', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
-
-
-class AccountStub(object):
-  """-------------------------- SERVER ACCOUNT ------------------------------
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.AccountHeartbeat = channel.unary_unary(
-        '/proto.Account/AccountHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.AccountHeartbeatOutput.FromString,
-        )
-    self.Login = channel.unary_unary(
-        '/proto.Account/Login',
-        request_serializer=mainServer__pb2.LoginInput.SerializeToString,
-        response_deserializer=mainServer__pb2.UserOutput.FromString,
-        )
-    self.Signup = channel.unary_unary(
-        '/proto.Account/Signup',
-        request_serializer=mainServer__pb2.SingupInput.SerializeToString,
-        response_deserializer=mainServer__pb2.SingupOutput.FromString,
-        )
-    self.ResendConfirmCode = channel.unary_unary(
-        '/proto.Account/ResendConfirmCode',
-        request_serializer=mainServer__pb2.ResendConfirmCodeInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-    self.Confirm = channel.unary_unary(
-        '/proto.Account/Confirm',
-        request_serializer=mainServer__pb2.ConfirmInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-    self.GetUsersByStatus = channel.unary_unary(
-        '/proto.Account/GetUsersByStatus',
-        request_serializer=mainServer__pb2.GetUsersByStatusInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GetUsersByStatusOutput.FromString,
-        )
-    self.DeleteUser = channel.unary_unary(
-        '/proto.Account/DeleteUser',
-        request_serializer=mainServer__pb2.DeleteUserInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-    self.UpdateUserStatus = channel.unary_unary(
-        '/proto.Account/UpdateUserStatus',
-        request_serializer=mainServer__pb2.UpdateUserStatusInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-
-
-class AccountServicer(object):
-  """-------------------------- SERVER ACCOUNT ------------------------------
-  """
-
-  def AccountHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Login(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Signup(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def ResendConfirmCode(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Confirm(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def GetUsersByStatus(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def DeleteUser(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def UpdateUserStatus(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_AccountServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'AccountHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.AccountHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.AccountHeartbeatOutput.SerializeToString,
-      ),
-      'Login': grpc.unary_unary_rpc_method_handler(
-          servicer.Login,
-          request_deserializer=mainServer__pb2.LoginInput.FromString,
-          response_serializer=mainServer__pb2.UserOutput.SerializeToString,
-      ),
-      'Signup': grpc.unary_unary_rpc_method_handler(
-          servicer.Signup,
-          request_deserializer=mainServer__pb2.SingupInput.FromString,
-          response_serializer=mainServer__pb2.SingupOutput.SerializeToString,
-      ),
-      'ResendConfirmCode': grpc.unary_unary_rpc_method_handler(
-          servicer.ResendConfirmCode,
-          request_deserializer=mainServer__pb2.ResendConfirmCodeInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-      'Confirm': grpc.unary_unary_rpc_method_handler(
-          servicer.Confirm,
-          request_deserializer=mainServer__pb2.ConfirmInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-      'GetUsersByStatus': grpc.unary_unary_rpc_method_handler(
-          servicer.GetUsersByStatus,
-          request_deserializer=mainServer__pb2.GetUsersByStatusInput.FromString,
-          response_serializer=mainServer__pb2.GetUsersByStatusOutput.SerializeToString,
-      ),
-      'DeleteUser': grpc.unary_unary_rpc_method_handler(
-          servicer.DeleteUser,
-          request_deserializer=mainServer__pb2.DeleteUserInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-      'UpdateUserStatus': grpc.unary_unary_rpc_method_handler(
-          servicer.UpdateUserStatus,
-          request_deserializer=mainServer__pb2.UpdateUserStatusInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.Account', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
-
-
-class DiskprophetStub(object):
-  """------------------------ SERVER DISKPROPHET ---------------------------
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.DPHeartbeat = channel.unary_unary(
-        '/proto.Diskprophet/DPHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.DPHeartbeatOutput.FromString,
-        )
-    self.DPGetPhysicalDisks = channel.unary_unary(
-        '/proto.Diskprophet/DPGetPhysicalDisks',
-        request_serializer=mainServer__pb2.DPGetPhysicalDisksInput.SerializeToString,
-        response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
-        )
-    self.DPGetDisksPrediction = channel.unary_unary(
-        '/proto.Diskprophet/DPGetDisksPrediction',
-        request_serializer=mainServer__pb2.DPGetDisksPredictionInput.SerializeToString,
-        response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
-        )
-
-
-class DiskprophetServicer(object):
-  """------------------------ SERVER DISKPROPHET ---------------------------
-  """
-
-  def DPHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def DPGetPhysicalDisks(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def DPGetDisksPrediction(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_DiskprophetServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'DPHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.DPHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.DPHeartbeatOutput.SerializeToString,
-      ),
-      'DPGetPhysicalDisks': grpc.unary_unary_rpc_method_handler(
-          servicer.DPGetPhysicalDisks,
-          request_deserializer=mainServer__pb2.DPGetPhysicalDisksInput.FromString,
-          response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
-      ),
-      'DPGetDisksPrediction': grpc.unary_unary_rpc_method_handler(
-          servicer.DPGetDisksPrediction,
-          request_deserializer=mainServer__pb2.DPGetDisksPredictionInput.FromString,
-          response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.Diskprophet', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
-
-
-class CollectionStub(object):
-  """------------------------ SERVER Collection ---------------------------
-
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.CollectionHeartbeat = channel.unary_unary(
-        '/proto.Collection/CollectionHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.CollectionHeartbeatOutput.FromString,
-        )
-    self.PostDBRelay = channel.unary_unary(
-        '/proto.Collection/PostDBRelay',
-        request_serializer=mainServer__pb2.PostDBRelayInput.SerializeToString,
-        response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
-        )
-    self.PostMetrics = channel.unary_unary(
-        '/proto.Collection/PostMetrics',
-        request_serializer=mainServer__pb2.PostMetricsInput.SerializeToString,
-        response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
-        )
-
-
-class CollectionServicer(object):
-  """------------------------ SERVER Collection ---------------------------
-
-  """
-
-  def CollectionHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def PostDBRelay(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def PostMetrics(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_CollectionServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'CollectionHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.CollectionHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.CollectionHeartbeatOutput.SerializeToString,
-      ),
-      'PostDBRelay': grpc.unary_unary_rpc_method_handler(
-          servicer.PostDBRelay,
-          request_deserializer=mainServer__pb2.PostDBRelayInput.FromString,
-          response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
-      ),
-      'PostMetrics': grpc.unary_unary_rpc_method_handler(
-          servicer.PostMetrics,
-          request_deserializer=mainServer__pb2.PostMetricsInput.FromString,
-          response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.Collection', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/pybind/mgr/diskprediction/common/clusterdata.py b/src/pybind/mgr/diskprediction/common/clusterdata.py
deleted file mode 100644 (file)
index 3810f0e..0000000
+++ /dev/null
@@ -1,511 +0,0 @@
-"""\r
-Ceph database API\r
-\r
-"""\r
-from __future__ import absolute_import\r
-\r
-import json\r
-import rbd\r
-import rados\r
-from mgr_module import CommandResult\r
-\r
-\r
-RBD_FEATURES_NAME_MAPPING = {\r
-    rbd.RBD_FEATURE_LAYERING: 'layering',\r
-    rbd.RBD_FEATURE_STRIPINGV2: 'striping',\r
-    rbd.RBD_FEATURE_EXCLUSIVE_LOCK: 'exclusive-lock',\r
-    rbd.RBD_FEATURE_OBJECT_MAP: 'object-map',\r
-    rbd.RBD_FEATURE_FAST_DIFF: 'fast-diff',\r
-    rbd.RBD_FEATURE_DEEP_FLATTEN: 'deep-flatten',\r
-    rbd.RBD_FEATURE_JOURNALING: 'journaling',\r
-    rbd.RBD_FEATURE_DATA_POOL: 'data-pool',\r
-    rbd.RBD_FEATURE_OPERATIONS: 'operations',\r
-}\r
-\r
-\r
-def differentiate(data1, data2):\r
-    """\r
-    # >>> times = [0, 2]\r
-    # >>> values = [100, 101]\r
-    # >>> differentiate(*zip(times, values))\r
-    0.5\r
-    """\r
-    return (data2[1] - data1[1]) / float(data2[0] - data1[0])\r
-\r
-\r
-class ClusterAPI(object):\r
-    def __init__(self, module_obj):\r
-        self.module = module_obj\r
-\r
-    @staticmethod\r
-    def format_bitmask(features):\r
-        """\r
-        Formats the bitmask:\r
-        # >>> format_bitmask(45)\r
-        ['deep-flatten', 'exclusive-lock', 'layering', 'object-map']\r
-        """\r
-        names = [val for key, val in RBD_FEATURES_NAME_MAPPING.items()\r
-                 if key & features == key]\r
-        return sorted(names)\r
-\r
-    def _open_connection(self, pool_name='device_health_metrics'):\r
-        pools = self.module.rados.list_pools()\r
-        is_pool = False\r
-        for pool in pools:\r
-            if pool == pool_name:\r
-                is_pool = True\r
-                break\r
-        if not is_pool:\r
-            self.module.log.debug('create %s pool' % pool_name)\r
-            # create pool\r
-            result = CommandResult('')\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'osd pool create',\r
-                'format': 'json',\r
-                'pool': pool_name,\r
-                'pg_num': 1,\r
-            }), '')\r
-            r, outb, outs = result.wait()\r
-            assert r == 0\r
-\r
-            # set pool application\r
-            result = CommandResult('')\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'osd pool application enable',\r
-                'format': 'json',\r
-                'pool': pool_name,\r
-                'app': 'mgr_devicehealth',\r
-            }), '')\r
-            r, outb, outs = result.wait()\r
-            assert r == 0\r
-\r
-        ioctx = self.module.rados.open_ioctx(pool_name)\r
-        return ioctx\r
-\r
-    @classmethod\r
-    def _rbd_disk_usage(cls, image, snaps, whole_object=True):\r
-        class DUCallback(object):\r
-            def __init__(self):\r
-                self.used_size = 0\r
-\r
-            def __call__(self, offset, length, exists):\r
-                if exists:\r
-                    self.used_size += length\r
-        snap_map = {}\r
-        prev_snap = None\r
-        total_used_size = 0\r
-        for _, size, name in snaps:\r
-            image.set_snap(name)\r
-            du_callb = DUCallback()\r
-            image.diff_iterate(0, size, prev_snap, du_callb,\r
-                               whole_object=whole_object)\r
-            snap_map[name] = du_callb.used_size\r
-            total_used_size += du_callb.used_size\r
-            prev_snap = name\r
-        return total_used_size, snap_map\r
-\r
-    def _rbd_image(self, ioctx, pool_name, image_name):\r
-        with rbd.Image(ioctx, image_name) as img:\r
-            stat = img.stat()\r
-            stat['name'] = image_name\r
-            stat['id'] = img.id()\r
-            stat['pool_name'] = pool_name\r
-            features = img.features()\r
-            stat['features'] = features\r
-            stat['features_name'] = self.format_bitmask(features)\r
-\r
-            # the following keys are deprecated\r
-            del stat['parent_pool']\r
-            del stat['parent_name']\r
-            stat['timestamp'] = '{}Z'.format(img.create_timestamp()\r
-                                             .isoformat())\r
-            stat['stripe_count'] = img.stripe_count()\r
-            stat['stripe_unit'] = img.stripe_unit()\r
-            stat['data_pool'] = None\r
-            try:\r
-                parent_info = img.parent_info()\r
-                stat['parent'] = {\r
-                    'pool_name': parent_info[0],\r
-                    'image_name': parent_info[1],\r
-                    'snap_name': parent_info[2]\r
-                }\r
-            except rbd.ImageNotFound:\r
-                # no parent image\r
-                stat['parent'] = None\r
-            # snapshots\r
-            stat['snapshots'] = []\r
-            for snap in img.list_snaps():\r
-                snap['timestamp'] = '{}Z'.format(\r
-                    img.get_snap_timestamp(snap['id']).isoformat())\r
-                snap['is_protected'] = img.is_protected_snap(snap['name'])\r
-                snap['used_bytes'] = None\r
-                snap['children'] = []\r
-                img.set_snap(snap['name'])\r
-                for child_pool_name, child_image_name in img.list_children():\r
-                    snap['children'].append({\r
-                        'pool_name': child_pool_name,\r
-                        'image_name': child_image_name\r
-                    })\r
-                stat['snapshots'].append(snap)\r
-            # disk usage\r
-            if 'fast-diff' in stat['features_name']:\r
-                snaps = [(s['id'], s['size'], s['name'])\r
-                         for s in stat['snapshots']]\r
-                snaps.sort(key=lambda s: s[0])\r
-                snaps += [(snaps[-1][0]+1 if snaps else 0, stat['size'], None)]\r
-                total_prov_bytes, snaps_prov_bytes = self._rbd_disk_usage(\r
-                    img, snaps, True)\r
-                stat['total_disk_usage'] = total_prov_bytes\r
-                for snap, prov_bytes in snaps_prov_bytes.items():\r
-                    if snap is None:\r
-                        stat['disk_usage'] = prov_bytes\r
-                        continue\r
-                    for ss in stat['snapshots']:\r
-                        if ss['name'] == snap:\r
-                            ss['disk_usage'] = prov_bytes\r
-                            break\r
-            else:\r
-                stat['total_disk_usage'] = None\r
-                stat['disk_usage'] = None\r
-            return stat\r
-\r
-    def get_rbd_list(self, pool_name=None):\r
-        if pool_name:\r
-            pools = [pool_name]\r
-        else:\r
-            pools = []\r
-            for data in self.get_osd_pools():\r
-                pools.append(data['pool_name'])\r
-        result = []\r
-        for pool in pools:\r
-            rbd_inst = rbd.RBD()\r
-            with self._open_connection(str(pool)) as ioctx:\r
-                names = rbd_inst.list(ioctx)\r
-                for name in names:\r
-                    try:\r
-                        stat = self._rbd_image(ioctx, pool_name, name)\r
-                    except rbd.ImageNotFound:\r
-                        continue\r
-                    result.append(stat)\r
-        return result\r
-\r
-    def get_pg_summary(self):\r
-        return self.module.get('pg_summary')\r
-\r
-    def get_df_stats(self):\r
-        return self.module.get('df').get('stats', {})\r
-\r
-    def get_object_pg_info(self, pool_name, object_name):\r
-        result = CommandResult('')\r
-        data_jaon = {}\r
-        self.module.send_command(\r
-            result, 'mon', '', json.dumps({\r
-                'prefix': 'osd map',\r
-                'format': 'json',\r
-                'pool': pool_name,\r
-                'object': object_name,\r
-            }), '')\r
-        ret, outb, outs = result.wait()\r
-        try:\r
-            if outb:\r
-                data_jaon = json.loads(outb)\r
-            else:\r
-                self.module.log.error('unable to get %s pg info' % pool_name)\r
-        except Exception as e:\r
-            self.module.log.error(\r
-                'unable to get %s pg, error: %s' % (pool_name, str(e)))\r
-        return data_jaon\r
-\r
-    def get_rbd_info(self, pool_name, image_name):\r
-        with self._open_connection(pool_name) as ioctx:\r
-            try:\r
-                stat = self._rbd_image(ioctx, pool_name, image_name)\r
-                if stat.get('id'):\r
-                    objects = self.get_pool_objects(pool_name, stat.get('id'))\r
-                    if objects:\r
-                        stat['objects'] = objects\r
-                        stat['pgs'] = list()\r
-                    for obj_name in objects:\r
-                        pgs_data = self.get_object_pg_info(pool_name, obj_name)\r
-                        stat['pgs'].extend([pgs_data])\r
-            except rbd.ImageNotFound:\r
-                stat = {}\r
-        return stat\r
-\r
-    def get_pool_objects(self, pool_name, image_id=None):\r
-        # list_objects\r
-        objects = []\r
-        with self._open_connection(pool_name) as ioctx:\r
-            object_iterator = ioctx.list_objects()\r
-            while True:\r
-                try:\r
-                    rados_object = object_iterator.next()\r
-                    if image_id is None:\r
-                        objects.append(str(rados_object.key))\r
-                    else:\r
-                        v = str(rados_object.key).split('.')\r
-                        if len(v) >= 2 and v[1] == image_id:\r
-                            objects.append(str(rados_object.key))\r
-                except StopIteration:\r
-                    break\r
-        return objects\r
-\r
-    def get_global_total_size(self):\r
-        total_bytes = \\r
-            self.module.get('df').get('stats', {}).get('total_bytes')\r
-        total_size = float(total_bytes) / (1024 * 1024 * 1024)\r
-        return round(total_size)\r
-\r
-    def get_global_avail_size(self):\r
-        total_avail_bytes = \\r
-            self.module.get('df').get('stats', {}).get('total_avail_bytes')\r
-        total_avail_size = float(total_avail_bytes) / (1024 * 1024 * 1024)\r
-        return round(total_avail_size, 2)\r
-\r
-    def get_global_raw_used_size(self):\r
-        total_used_bytes = \\r
-            self.module.get('df').get('stats', {}).get('total_used_bytes')\r
-        total_raw_used_size = float(total_used_bytes) / (1024 * 1024 * 1024)\r
-        return round(total_raw_used_size, 2)\r
-\r
-    def get_global_raw_used_percent(self):\r
-        total_bytes = \\r
-            self.module.get('df').get('stats').get('total_bytes')\r
-        total_used_bytes = \\r
-            self.module.get('df').get('stats').get('total_used_bytes')\r
-        if total_bytes and total_used_bytes:\r
-            total_used_percent = \\r
-                float(total_used_bytes) / float(total_bytes) * 100\r
-        else:\r
-            total_used_percent = 0.0\r
-        return round(total_used_percent, 2)\r
-\r
-    def get_osd_data(self):\r
-        return self.module.get('config').get('osd_data', '')\r
-\r
-    def get_osd_journal(self):\r
-        return self.module.get('config').get('osd_journal', '')\r
-\r
-    def get_osd_metadata(self, osd_id=None):\r
-        if osd_id is not None:\r
-            return self.module.get('osd_metadata')[str(osd_id)]\r
-        return self.module.get('osd_metadata')\r
-\r
-    def get_mgr_metadata(self, mgr_id):\r
-        return self.module.get_metadata('mgr', mgr_id)\r
-\r
-    def get_osd_epoch(self):\r
-        return self.module.get('osd_map').get('epoch', 0)\r
-\r
-    def get_osds(self):\r
-        return self.module.get('osd_map').get('osds', [])\r
-\r
-    def get_max_osd(self):\r
-        return self.module.get('osd_map').get('max_osd', '')\r
-\r
-    def get_osd_pools(self):\r
-        return self.module.get('osd_map').get('pools', [])\r
-\r
-    def get_pool_bytes_used(self, pool_id):\r
-        bytes_used = None\r
-        pools = self.module.get('df').get('pools', [])\r
-        for pool in pools:\r
-            if pool_id == pool['id']:\r
-                bytes_used = pool['stats']['bytes_used']\r
-        return bytes_used\r
-\r
-    def get_cluster_id(self):\r
-        return self.module.get('mon_map').get('fsid')\r
-\r
-    def get_health_status(self):\r
-        health = json.loads(self.module.get('health')['json'])\r
-        return health.get('status')\r
-\r
-    def get_health_checks(self):\r
-        health = json.loads(self.module.get('health')['json'])\r
-        if health.get('checks'):\r
-            message = ''\r
-            checks = health['checks']\r
-            for key in checks.keys():\r
-                if message:\r
-                    message += ";"\r
-                if checks[key].get('summary', {}).get('message', ""):\r
-                    message += checks[key]['summary']['message']\r
-            return message\r
-        else:\r
-            return ''\r
-\r
-    def get_mons(self):\r
-        return self.module.get('mon_map').get('mons', [])\r
-\r
-    def get_mon_status(self):\r
-        mon_status = json.loads(self.module.get('mon_status')['json'])\r
-        return mon_status\r
-\r
-    def get_osd_smart(self, osd_id, device_id=None):\r
-        osd_devices = []\r
-        osd_smart = {}\r
-        devices = self.module.get('devices')\r
-        for dev in devices.get('devices', []):\r
-            osd = ""\r
-            daemons = dev.get('daemons', [])\r
-            for daemon in daemons:\r
-                if daemon[4:] != str(osd_id):\r
-                    continue\r
-                osd = daemon\r
-            if not osd:\r
-                continue\r
-            if dev.get('devid'):\r
-                osd_devices.append(dev.get('devid'))\r
-        for dev_id in osd_devices:\r
-            o_key = ''\r
-            if device_id and dev_id != device_id:\r
-                continue\r
-            smart_data = self.get_device_health(dev_id)\r
-            if smart_data:\r
-                o_key = sorted(smart_data.iterkeys(), reverse=True)[0]\r
-            if o_key and smart_data and smart_data.values():\r
-                dev_smart = smart_data[o_key]\r
-                if dev_smart:\r
-                    osd_smart[dev_id] = dev_smart\r
-        return osd_smart\r
-\r
-    def get_device_health(self, device_id):\r
-        res = {}\r
-        try:\r
-            with self._open_connection() as ioctx:\r
-                with rados.ReadOpCtx() as op:\r
-                    iter, ret = ioctx.get_omap_vals(op, '', '', 500)\r
-                    assert ret == 0\r
-                    try:\r
-                        ioctx.operate_read_op(op, device_id)\r
-                        for key, value in list(iter):\r
-                            v = None\r
-                            try:\r
-                                v = json.loads(value)\r
-                            except ValueError:\r
-                                self.module.log.error(\r
-                                    'unable to parse value for %s: "%s"' % (key, value))\r
-                            res[key] = v\r
-                    except IOError:\r
-                        pass\r
-                    except OSError as e:\r
-                        self.module.log.error(\r
-                            'unable to get device {} health, {}'.format(device_id, str(e)))\r
-        except IOError:\r
-            return {}\r
-        return res\r
-\r
-    def get_osd_hostname(self, osd_id):\r
-        result = ''\r
-        osd_metadata = self.get_osd_metadata(osd_id)\r
-        if osd_metadata:\r
-            osd_host = osd_metadata.get('hostname', 'None')\r
-            result = osd_host\r
-        return result\r
-\r
-    def get_osd_device_id(self, osd_id):\r
-        result = {}\r
-        if not str(osd_id).isdigit():\r
-            if str(osd_id)[0:4] == 'osd.':\r
-                osdid = osd_id[4:]\r
-            else:\r
-                raise Exception('not a valid <osd.NNN> id or number')\r
-        else:\r
-            osdid = osd_id\r
-        osd_metadata = self.get_osd_metadata(osdid)\r
-        if osd_metadata:\r
-            osd_device_ids = osd_metadata.get('device_ids', '')\r
-            if osd_device_ids:\r
-                result = {}\r
-                for osd_device_id in osd_device_ids.split(','):\r
-                    dev_name = ''\r
-                    if len(str(osd_device_id).split('=')) >= 2:\r
-                        dev_name = osd_device_id.split('=')[0]\r
-                        dev_id = osd_device_id.split('=')[1]\r
-                    else:\r
-                        dev_id = osd_device_id\r
-                    if dev_name:\r
-                        result[dev_name] = {'dev_id': dev_id}\r
-        return result\r
-\r
-    def get_file_systems(self):\r
-        return self.module.get('fs_map').get('filesystems', [])\r
-\r
-    def get_pg_stats(self):\r
-        return self.module.get('pg_dump').get('pg_stats', [])\r
-\r
-    def get_all_perf_counters(self):\r
-        return self.module.get_all_perf_counters()\r
-\r
-    def get(self, data_name):\r
-        return self.module.get(data_name)\r
-\r
-    def set_device_life_expectancy(self, device_id, from_date, to_date=None):\r
-        result = CommandResult('')\r
-\r
-        if to_date is None:\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'device set-life-expectancy',\r
-                'devid': device_id,\r
-                'from': from_date\r
-            }), '')\r
-        else:\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'device set-life-expectancy',\r
-                'devid': device_id,\r
-                'from': from_date,\r
-                'to': to_date\r
-            }), '')\r
-        ret, outb, outs = result.wait()\r
-        if ret != 0:\r
-            self.module.log.error(\r
-                'failed to set device life expectancy, %s' % outs)\r
-        return ret\r
-\r
-    def reset_device_life_expectancy(self, device_id):\r
-        result = CommandResult('')\r
-        self.module.send_command(result, 'mon', '', json.dumps({\r
-            'prefix': 'device rm-life-expectancy',\r
-            'devid': device_id\r
-        }), '')\r
-        ret, outb, outs = result.wait()\r
-        if ret != 0:\r
-            self.module.log.error(\r
-                'failed to reset device life expectancy, %s' % outs)\r
-        return ret\r
-\r
-    def get_server(self, hostname):\r
-        return self.module.get_server(hostname)\r
-\r
-    def get_configuration(self, key):\r
-        return self.module.get_configuration(key)\r
-\r
-    def get_rate(self, svc_type, svc_name, path):\r
-        """returns most recent rate"""\r
-        data = self.module.get_counter(svc_type, svc_name, path)[path]\r
-\r
-        if data and len(data) > 1:\r
-            return differentiate(*data[-2:])\r
-        return 0.0\r
-\r
-    def get_latest(self, daemon_type, daemon_name, counter):\r
-        return self.module.get_latest(daemon_type, daemon_name, counter)\r
-\r
-    def get_all_information(self):\r
-        result = dict()\r
-        result['osd_map'] = self.module.get('osd_map')\r
-        result['osd_map_tree'] = self.module.get('osd_map_tree')\r
-        result['osd_map_crush'] = self.module.get('osd_map_crush')\r
-        result['config'] = self.module.get('config')\r
-        result['mon_map'] = self.module.get('mon_map')\r
-        result['fs_map'] = self.module.get('fs_map')\r
-        result['osd_metadata'] = self.module.get('osd_metadata')\r
-        result['pg_summary'] = self.module.get('pg_summary')\r
-        result['pg_dump'] = self.module.get('pg_dump')\r
-        result['io_rate'] = self.module.get('io_rate')\r
-        result['df'] = self.module.get('df')\r
-        result['osd_stats'] = self.module.get('osd_stats')\r
-        result['health'] = self.get_health_status()\r
-        result['mon_status'] = self.get_mon_status()\r
-        return result\r
diff --git a/src/pybind/mgr/diskprediction/common/cypher.py b/src/pybind/mgr/diskprediction/common/cypher.py
deleted file mode 100644 (file)
index 7b7b60e..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import absolute_import
-
-import time
-
-
-class NodeInfo(object):
-    """ Neo4j Node information """
-    def __init__(self, label, domain_id, name, meta):
-        self.label = label
-        self.domain_id = domain_id
-        self.name = name
-        self.meta = meta
-
-
-class CypherOP(object):
-    """ Cypher Operation """
-
-    @staticmethod
-    def update(node, key, value, timestamp=int(time.time()*(1000**3))):
-        result = ''
-        if isinstance(node, NodeInfo):
-            if key != 'time':
-                cy_value = '\'%s\'' % value
-            else:
-                cy_value = value
-            result = \
-                'set %s.%s=case when %s.time >= %s then %s.%s ELSE %s end' % (
-                    node.label, key, node.label, timestamp, node.label, key,
-                    cy_value)
-        return result
-
-    @staticmethod
-    def create_or_merge(node, timestamp=int(time.time()*(1000**3))):
-        result = ''
-        if isinstance(node, NodeInfo):
-            meta_list = []
-            if isinstance(node.meta, dict):
-                for key, value in node.meta.items():
-                    meta_list.append(CypherOP.update(node, key, value, timestamp))
-            domain_id = '{domainId:\'%s\'}' % node.domain_id
-            if meta_list:
-                result = 'merge (%s:%s %s) %s %s %s' % (
-                    node.label, node.label,
-                    domain_id,
-                    CypherOP.update(node, 'name', node.name, timestamp),
-                    ' '.join(meta_list),
-                    CypherOP.update(node, 'time', timestamp, timestamp))
-            else:
-                result = 'merge (%s:%s %s) %s %s' % (
-                    node.label, node.label,
-                    domain_id,
-                    CypherOP.update(node, 'name', node.name, timestamp),
-                    CypherOP.update(node, 'time', timestamp, timestamp))
-        return result
-
-    @staticmethod
-    def add_link(snode, dnode, relationship, timestamp=None):
-        result = ''
-        if timestamp is None:
-            timestamp = int(time.time()*(1000**3))
-        if isinstance(snode, NodeInfo) and isinstance(dnode, NodeInfo):
-            cy_snode = CypherOP.create_or_merge(snode, timestamp)
-            cy_dnode = CypherOP.create_or_merge(dnode, timestamp)
-            target = snode.label + dnode.label
-            link = 'merge (%s)-[%s:%s]->(%s) set %s.time=case when %s.time >= %s then %s.time ELSE %s end' % (
-                snode.label, target, relationship,
-                dnode.label, target,
-                target, timestamp,
-                target, timestamp)
-            result = '%s %s %s' % (cy_snode, cy_dnode, link)
-        return result
diff --git a/src/pybind/mgr/diskprediction/common/grpcclient.py b/src/pybind/mgr/diskprediction/common/grpcclient.py
deleted file mode 100644 (file)
index e12a8f3..0000000
+++ /dev/null
@@ -1,235 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-import grpc
-import json
-from logging import getLogger
-import os
-import time
-
-from . import DummyResonse
-from . import client_pb2
-from . import client_pb2_grpc
-
-
-def gen_configuration(**kwargs):
-    configuration = {
-        'host': kwargs.get('host', 'api.diskprophet.com'),
-        'user': kwargs.get('user'),
-        'password': kwargs.get('password'),
-        'port': kwargs.get('port', 31400),
-        'mgr_inst': kwargs.get('mgr_inst', None),
-        'cert_context': kwargs.get('cert_context'),
-        'ssl_target_name': kwargs.get('ssl_target_name', 'api.diskprophet.com'),
-        'default_authority': kwargs.get('default_authority', 'api.diskprophet.com')}
-    return configuration
-
-
-class GRPcClient:
-
-    def __init__(self, configuration):
-        self.auth = None
-        self.channel = None
-        self.host = configuration.get('host')
-        self.port = configuration.get('port')
-        if configuration.get('user') and configuration.get('password'):
-            self.auth = (
-                ('account', configuration.get('user')),
-                ('password', configuration.get('password')))
-        self.cert_context = configuration.get('cert_context')
-        self.ssl_target_name = configuration.get('ssl_target_name')
-        self.default_authority = configuration.get('default_authority')
-        self.mgr_inst = configuration.get('mgr_inst')
-        if self.mgr_inst:
-            self._logger = self.mgr_inst.log
-        else:
-            self._logger = getLogger()
-        self._get_channel()
-
-    def __nonzero__(self):
-        if self.channel:
-            return True
-        else:
-            return False
-
-    def _get_channel(self):
-        try:
-            creds = grpc.ssl_channel_credentials(
-                root_certificates=self.cert_context)
-            self.channel = \
-                grpc.secure_channel('{}:{}'.format(
-                    self.host, self.port), creds,
-                    options=(('grpc.ssl_target_name_override', self.ssl_target_name,),
-                             ('grpc.default_authority', self.default_authority),))
-        except Exception as e:
-            self._logger.error(
-                'failed to create connection exception: {}'.format(
-                    ';'.join(str(e).split('\n\t'))))
-
-    def test_connection(self):
-        try:
-            stub_accout = client_pb2_grpc.AccountStub(self.channel)
-            result = stub_accout.AccountHeartbeat(client_pb2.Empty())
-            if result and "is alive" in str(result.message):
-                return True
-            else:
-                return False
-        except Exception as e:
-            self._logger.error(
-                'failed to test connection exception: {}'.format(
-                    ';'.join(str(e).split('\n\t'))))
-            return False
-
-    def _send_metrics(self, data, measurement):
-        status_info = dict()
-        status_info['measurement'] = None
-        status_info['success_count'] = 0
-        status_info['failure_count'] = 0
-        for dp_data in data:
-            d_measurement = dp_data.measurement
-            if not d_measurement:
-                status_info['measurement'] = measurement
-            else:
-                status_info['measurement'] = d_measurement
-            tag_list = []
-            field_list = []
-            for name in dp_data.tags:
-                tag = '{}={}'.format(name, dp_data.tags[name])
-                tag_list.append(tag)
-            for name in dp_data.fields:
-                if dp_data.fields[name] is None:
-                    continue
-                if isinstance(dp_data.fields[name], str):
-                    field = '{}=\"{}\"'.format(name, dp_data.fields[name])
-                elif isinstance(dp_data.fields[name], bool):
-                    field = '{}={}'.format(name,
-                                           str(dp_data.fields[name]).lower())
-                elif (isinstance(dp_data.fields[name], int) or
-                      isinstance(dp_data.fields[name], long)):
-                    field = '{}={}i'.format(name, dp_data.fields[name])
-                else:
-                    field = '{}={}'.format(name, dp_data.fields[name])
-                field_list.append(field)
-            data = '{},{} {} {}'.format(
-                status_info['measurement'],
-                ','.join(tag_list),
-                ','.join(field_list),
-                int(time.time() * 1000 * 1000 * 1000))
-            try:
-                resp = self._send_info(data=[data], measurement=status_info['measurement'])
-                status_code = resp.status_code
-                if 200 <= status_code < 300:
-                    self._logger.debug(
-                        '{} send diskprediction api success(ret: {})'.format(
-                            status_info['measurement'], status_code))
-                    status_info['success_count'] += 1
-                else:
-                    self._logger.error(
-                        'return code: {}, content: {}'.format(
-                            status_code, resp.content))
-                    status_info['failure_count'] += 1
-            except Exception as e:
-                status_info['failure_count'] += 1
-                self._logger.error(str(e))
-        return status_info
-
-    def _send_db_relay(self, data, measurement):
-        status_info = dict()
-        status_info['measurement'] = measurement
-        status_info['success_count'] = 0
-        status_info['failure_count'] = 0
-        for dp_data in data:
-            try:
-                resp = self._send_info(
-                    data=[dp_data.fields['cmd']], measurement=measurement)
-                status_code = resp.status_code
-                if 200 <= status_code < 300:
-                    self._logger.debug(
-                        '{} send diskprediction api success(ret: {})'.format(
-                            measurement, status_code))
-                    status_info['success_count'] += 1
-                else:
-                    self._logger.error(
-                        'return code: {}, content: {}'.format(
-                            status_code, resp.content))
-                    status_info['failure_count'] += 1
-            except Exception as e:
-                status_info['failure_count'] += 1
-                self._logger.error(str(e))
-        return status_info
-
-    def send_info(self, data, measurement):
-        """
-        :param data: data structure
-        :param measurement: data measurement class name
-        :return:
-            status_info = {
-                'success_count': <count>,
-                'failure_count': <count>
-            }
-        """
-        if measurement == 'db_relay':
-            return self._send_db_relay(data, measurement)
-        else:
-            return self._send_metrics(data, measurement)
-
-    def _send_info(self, data, measurement):
-        resp = DummyResonse()
-        try:
-            stub_collection = client_pb2_grpc.CollectionStub(self.channel)
-            if measurement == 'db_relay':
-                result = stub_collection.PostDBRelay(
-                    client_pb2.PostDBRelayInput(cmds=data), metadata=self.auth)
-            else:
-                result = stub_collection.PostMetrics(
-                    client_pb2.PostMetricsInput(points=data), metadata=self.auth)
-            if result and 'success' in str(result.message).lower():
-                resp.status_code = 200
-                resp.content = ''
-            else:
-                resp.status_code = 400
-                resp.content = ';'.join(str(result).split('\n\t'))
-                self._logger.error(
-                    'failed to send info: {}'.format(resp.content))
-        except Exception as e:
-            resp.status_code = 400
-            resp.content = ';'.join(str(e).split('\n\t'))
-            self._logger.error(
-                'failed to send info exception: {}'.format(resp.content))
-        return resp
-
-    def query_info(self, host_domain_id, disk_domain_id, measurement):
-        resp = DummyResonse()
-        try:
-            stub_dp = client_pb2_grpc.DiskprophetStub(self.channel)
-            predicted = stub_dp.DPGetDisksPrediction(
-                client_pb2.DPGetDisksPredictionInput(
-                    physicalDiskIds=disk_domain_id),
-                metadata=self.auth)
-            if predicted and hasattr(predicted, 'data'):
-                resp.status_code = 200
-                resp.content = ''
-                resp_json = json.loads(predicted.data)
-                rc = resp_json.get('results', [])
-                if rc:
-                    series = rc[0].get('series', [])
-                    if series:
-                        values = series[0].get('values', [])
-                        if not values:
-                            resp.resp_json = {}
-                        else:
-                            columns = series[0].get('columns', [])
-                            for item in values:
-                                # get prediction key and value from server.
-                                for name, value in zip(columns, item):
-                                    # process prediction data
-                                    resp.resp_json[name] = value
-                return resp
-            else:
-                resp.status_code = 400
-                resp.content = ''
-                resp.resp_json = {'error': ';'.join(str(predicted).split('\n\t'))}
-                return resp
-        except Exception as e:
-            resp.status_code = 400
-            resp.content = ';'.join(str(e).split('\n\t'))
-            resp.resp_json = {'error': resp.content}
-            return resp
diff --git a/src/pybind/mgr/diskprediction/common/localpredictor.py b/src/pybind/mgr/diskprediction/common/localpredictor.py
deleted file mode 100644 (file)
index 2a79aee..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-# from __future__ import absolute_import
-from __future__ import absolute_import
-from logging import getLogger
-import time
-
-from . import DummyResonse
-from .clusterdata import ClusterAPI
-from ..predictor.disk_failure_predictor import DiskFailurePredictor, get_diskfailurepredictor_path
-
-
-def gen_configuration(**kwargs):
-    configuration = {
-        'mgr_inst': kwargs.get('mgr_inst', None)}
-    return configuration
-
-
-class LocalPredictor:
-
-    def __init__(self, configuration):
-        self.mgr_inst = configuration.get('mgr_inst')
-        if self.mgr_inst:
-            self._logger = self.mgr_inst.log
-        else:
-            self._logger = getLogger()
-
-    def __nonzero__(self):
-        if self.mgr_inst:
-            return True
-        else:
-            return False
-
-    def test_connection(self):
-        resp = DummyResonse()
-        resp.status_code = 200
-        resp.content = ''
-        return resp
-
-    def send_info(self, data, measurement):
-        status_info = dict()
-        status_info['measurement'] = measurement
-        status_info['success_count'] = 0
-        status_info['failure_count'] = 0
-        for dp_data in data:
-            try:
-                resp = self._send_info(data=dp_data, measurement=measurement)
-                status_code = resp.status_code
-                if 200 <= status_code < 300:
-                    self._logger.debug(
-                        '%s send diskprediction api success(ret: %s)'
-                        % (measurement, status_code))
-                    status_info['success_count'] += 1
-                else:
-                    self._logger.error(
-                        'return code: %s, content: %s, data: %s' % (
-                            status_code, resp.content, data))
-                    status_info['failure_count'] += 1
-            except Exception as e:
-                status_info['failure_count'] += 1
-                self._logger.error(str(e))
-        return status_info
-
-    def _send_info(self, data, measurement):
-        resp = DummyResonse()
-        resp.status_code = 200
-        resp.content = ''
-        return resp
-
-    def _local_predict(self, smart_datas):
-        obj_predictor = DiskFailurePredictor()
-        predictor_path = get_diskfailurepredictor_path()
-        models_path = "{}/models".format(predictor_path)
-        obj_predictor.initialize(models_path)
-        return obj_predictor.predict(smart_datas)
-
-    def query_info(self, host_domain_id, disk_domain_id, measurement):
-        predict_datas = list()
-        obj_api = ClusterAPI(self.mgr_inst)
-        predicted_result = 'Unknown'
-        smart_datas = obj_api.get_device_health(disk_domain_id)
-        if len(smart_datas) >= 6:
-            o_keys = sorted(smart_datas.iterkeys(), reverse=True)
-            for o_key in o_keys:
-                dev_smart = {}
-                s_val = smart_datas[o_key]
-                ata_smart = s_val.get('ata_smart_attributes', {})
-                for attr in ata_smart.get('table', []):
-                    if attr.get('raw', {}).get('string'):
-                        if str(attr.get('raw', {}).get('string', '0')).isdigit():
-                            dev_smart['smart_%s_raw' % attr.get('id')] = \
-                                int(attr.get('raw', {}).get('string', '0'))
-                        else:
-                            if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit():
-                                dev_smart['smart_%s_raw' % attr.get('id')] = \
-                                    int(attr.get('raw', {}).get('string',
-                                                                '0').split(' ')[0])
-                            else:
-                                dev_smart['smart_%s_raw' % attr.get('id')] = \
-                                    attr.get('raw', {}).get('value', 0)
-                if s_val.get('power_on_time', {}).get('hours') is not None:
-                    dev_smart['smart_9_raw'] = int(s_val['power_on_time']['hours'])
-                if dev_smart:
-                    predict_datas.append(dev_smart)
-
-            if predict_datas:
-                predicted_result = self._local_predict(predict_datas)
-            resp = DummyResonse()
-            resp.status_code = 200
-            resp.resp_json = {
-                "disk_domain_id": disk_domain_id,
-                "near_failure": predicted_result,
-                "predicted": int(time.time() * (1000 ** 3))}
-            return resp
-        else:
-            resp = DummyResonse()
-            resp.status_code = 400
-            resp.content = '\'predict\' need least 6 pieces disk smart data'
-            resp.resp_json = \
-                {'error': '\'predict\' need least 6 pieces disk smart data'}
-        return resp
diff --git a/src/pybind/mgr/diskprediction/module.py b/src/pybind/mgr/diskprediction/module.py
deleted file mode 100644 (file)
index 1edac76..0000000
+++ /dev/null
@@ -1,376 +0,0 @@
-"""
-A diskprediction module
-"""
-from __future__ import absolute_import
-
-from datetime import datetime
-import errno
-import json
-from mgr_module import MgrModule
-import os
-from threading import Event
-
-from .common import DP_MGR_STAT_ENABLED, DP_MGR_STAT_DISABLED
-from .task import MetricsRunner, PredictionRunner, SmartRunner
-
-
-DP_AGENTS = [MetricsRunner, SmartRunner, PredictionRunner]
-
-
-class Module(MgrModule):
-
-    OPTIONS = [
-        {
-            'name': 'diskprediction_server',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_port',
-            'default': '31400'
-        },
-        {
-            'name': 'diskprediction_user',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_password',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_upload_metrics_interval',
-            'default': '600'
-        },
-        {
-            'name': 'diskprediction_upload_smart_interval',
-            'default': '43200'
-        },
-        {
-            'name': 'diskprediction_retrieve_prediction_interval',
-            'default': '43200'
-        },
-        {
-            'name': 'diskprediction_cert_context',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_ssl_target_name_override',
-            'default': 'api.diskprophet.com'
-        },
-        {
-            'name': 'diskprediction_default_authority',
-            'default': 'api.diskprophet.com'
-        }
-    ]
-
-    COMMANDS = [
-        {
-            'cmd': 'device show-prediction-config',
-            'desc': 'Prints diskprediction configuration',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'device set-cloud-prediction-config '
-                   'name=server,type=CephString,req=true '
-                   'name=user,type=CephString,req=true '
-                   'name=password,type=CephString,req=true '
-                   'name=certfile,type=CephString,req=true '
-                   'name=port,type=CephString,req=false ',
-            'desc': 'Configure Disk Prediction service',
-            'perm': 'rw'
-        },
-        {
-            'cmd': 'device get-predicted-status '
-                   'name=dev_id,type=CephString,req=true',
-            'desc': 'Get physical device predicted result',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'device debug metrics-forced',
-            'desc': 'Run metrics agent forced',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'device debug prediction-forced',
-            'desc': 'Run prediction agent forced',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'device debug smart-forced',
-            'desc': 'Run smart agent forced',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'device predict-life-expectancy '
-                   'name=dev_id,type=CephString,req=true',
-            'desc': 'Predict life expectancy with local predictor',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'diskprediction self-test',
-            'desc': 'Prints hello world to mgr.x.log',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'diskprediction status',
-            'desc': 'Check diskprediction status',
-            'perm': 'r'
-        }
-    ]
-
-    def __init__(self, *args, **kwargs):
-        super(Module, self).__init__(*args, **kwargs)
-        self.status = {'status': DP_MGR_STAT_DISABLED}
-        self.shutdown_event = Event()
-        self._agents = []
-        self._activated_cloud = False
-        self._activated_local = False
-        self._prediction_result = {}
-        self.config = dict()
-
-    @property
-    def config_keys(self):
-        return dict((o['name'], o.get('default', None)) for o in self.OPTIONS)
-
-    def set_config_option(self, option, value):
-        if option not in self.config_keys.keys():
-            raise RuntimeError('{0} is a unknown configuration '
-                               'option'.format(option))
-
-        if option in ['diskprediction_port',
-                      'diskprediction_upload_metrics_interval',
-                      'diskprediction_upload_smart_interval',
-                      'diskprediction_retrieve_prediction_interval']:
-            if not str(value).isdigit():
-                raise RuntimeError('invalid {} configured. Please specify '
-                                   'a valid integer {}'.format(option, value))
-
-        self.log.debug('Setting in-memory config option %s to: %s', option,
-                       value)
-        self.set_config(option, value)
-        self.config[option] = value
-
-        return True
-
-    def get_configuration(self, key):
-        return self.get_config(key, self.config_keys[key])
-
-    def _show_prediction_config(self, inbuf, cmd):
-        self.show_module_config()
-        return 0, json.dumps(self.config, indent=4), ''
-
-    def _self_test(self, inbuf, cmd):
-        from .test.test_agents import test_agents
-        test_agents(self)
-        return 0, 'self-test completed', ''
-
-    def _set_ssl_target_name(self, inbuf, cmd):
-        str_ssl_target = cmd.get('ssl_target_name', '')
-        try:
-            self.set_config('diskprediction_ssl_target_name_override', str_ssl_target)
-            return (0,
-                    'success to config ssl target name', 0)
-        except Exception as e:
-            return -errno.EINVAL, '', str(e)
-
-    def _set_ssl_default_authority(self, inbuf, cmd):
-        str_ssl_authority = cmd.get('ssl_authority', '')
-        try:
-            self.set_config('diskprediction_default_authority', str_ssl_authority)
-            return (0,
-                    'success to config ssl default authority', 0)
-        except Exception as e:
-            return -errno.EINVAL, '', str(e)
-
-    def _get_predicted_status(self, inbuf, cmd):
-        physical_data = dict()
-        try:
-            if not self._prediction_result:
-                for _agent in self._agents:
-                    if isinstance(_agent, PredictionRunner):
-                        _agent.event.set()
-                        break
-            pre_data = self._prediction_result.get(cmd['dev_id'])
-            if pre_data:
-                p_data = pre_data.get('prediction', {})
-                if not p_data.get('predicted'):
-                    predicted = ''
-                else:
-                    predicted = datetime.fromtimestamp(int(
-                        p_data.get('predicted')) / (1000 ** 3))
-                d_data = {
-                    'near_failure': p_data.get('near_failure'),
-                    'predicted': str(predicted),
-                    'serial_number': pre_data.get('serial_number'),
-                    'disk_wwn': pre_data.get('disk_wwn'),
-                    'attachment': p_data.get('disk_name', '')
-                }
-                physical_data[cmd['dev_id']] = d_data
-                msg = json.dumps(d_data, indent=4)
-            else:
-                msg = 'device %s predicted data not ready' % cmd['dev_id']
-        except Exception as e:
-            if str(e).find('No such file') >= 0:
-                msg = 'unable to get device {} predicted data'.format(
-                    cmd['dev_id'])
-            else:
-                msg = 'unable to get osd {} predicted data, {}'.format(
-                    cmd['dev_id'], str(e))
-            self.log.error(msg)
-            return -errno.EINVAL, '', msg
-        return 0, msg, ''
-
-    def _set_cloud_prediction_config(self, inbuf, cmd):
-        trusted_certs = ''
-        str_cert_path = cmd.get('certfile', '')
-        if os.path.exists(str_cert_path):
-            with open(str_cert_path, 'rb') as f:
-                trusted_certs = f.read()
-            self.set_config_option(
-                'diskprediction_cert_context', trusted_certs)
-            for _agent in self._agents:
-                _agent.event.set()
-            self.set_config('diskprediction_server', cmd['server'])
-            self.set_config('diskprediction_user', cmd['user'])
-            self.set_config('diskprediction_password', cmd['password'])
-            if cmd.get('port'):
-                self.set_config('diskprediction_port', cmd['port'])
-            return 0, 'succeed to config cloud mode connection', ''
-        else:
-            return -errno.EINVAL, '', 'certification file not existed'
-
-    def _debug_prediction_forced(self, inbuf, cmd):
-        msg = ''
-        for _agent in self._agents:
-            if isinstance(_agent, PredictionRunner):
-                msg = 'run prediction agent successfully'
-                _agent.event.set()
-        return 0, msg, ''
-
-    def _debug_metrics_forced(self, inbuf, cmd):
-        msg = ''
-        for _agent in self._agents:
-            if isinstance(_agent, MetricsRunner):
-                msg = 'run metrics agent successfully'
-                _agent.event.set()
-        return 0, msg, ''
-
-    def _debug_smart_forced(self, inbuf, cmd):
-        msg = ' '
-        for _agent in self._agents:
-            if isinstance(_agent, SmartRunner):
-                msg = 'run smart agent successfully'
-                _agent.event.set()
-        return 0, msg, ''
-
-    def _status(self, inbuf, cmd):
-        return 0, json.dumps(self.status), ''
-
-    def _predict_life_expectancy(self, inbuf, cmd):
-        assert cmd['dev_id']
-        from .common.localpredictor import LocalPredictor, gen_configuration
-        conf = gen_configuration(mgr_inst=self)
-        obj_predictor = LocalPredictor(conf)
-        result = obj_predictor.query_info('', cmd['dev_id'], '')
-        if result.status_code == 200:
-            near_failure = result.json()['near_failure']
-            if near_failure.lower() == 'good':
-                return 0, '>6w', ''
-            elif near_failure.lower() == 'warning':
-                return 0, '>=2w and <=6w', ''
-            elif near_failure.lower() == 'bad':
-                return 0, '<2w', ''
-            else:
-                return 0, 'unknown', ''
-        else:
-            return -errno.ENAVAIL, '', result.content
-
-    def handle_command(self, inbuf, cmd):
-        for o_cmd in self.COMMANDS:
-            if cmd['prefix'] == o_cmd['cmd'][:len(cmd['prefix'])]:
-                fun_name = ''
-                avgs = o_cmd['cmd'].split(' ')
-                for avg in avgs:
-                    if avg.lower() == 'diskprediction':
-                        continue
-                    if avg.lower() == 'device':
-                        continue
-                    if '=' in avg or ',' in avg or not avg:
-                        continue
-                    fun_name += '_%s' % avg.replace('-', '_')
-                if fun_name:
-                    fun = getattr(
-                        self, fun_name)
-                    if fun:
-                        return fun(inbuf, cmd)
-        return -errno.EINVAL, '', 'cmd not found'
-
-    def show_module_config(self):
-        self.fsid = self.get('mon_map')['fsid']
-        self.log.debug('Found Ceph fsid %s', self.fsid)
-
-        for key, default in self.config_keys.items():
-            self.set_config_option(key, self.get_config(key, default))
-
-    def serve(self):
-        self.log.info('Starting diskprediction module')
-        self.status = {'status': DP_MGR_STAT_ENABLED}
-
-        while True:
-            mode = self.get_option('device_failure_prediction_mode')
-            if mode == 'cloud':
-                if not self._activated_cloud:
-                    self.start_cloud_disk_prediction()
-            else:
-                if self._activated_cloud:
-                    self.stop_disk_prediction()
-            if mode == 'local':
-                if not self._activated_local:
-                    self.start_local_disk_prediction()
-            else:
-                if self._activated_local:
-                    self.stop_disk_prediction()
-
-            self.shutdown_event.wait(5)
-            if self.shutdown_event.is_set():
-                break
-        self.stop_disk_prediction()
-
-    def start_cloud_disk_prediction(self):
-        assert not self._activated_cloud
-        for dp_agent in DP_AGENTS:
-            obj_agent = dp_agent(self)
-            if obj_agent:
-                obj_agent.start()
-            else:
-                raise Exception('failed to start task %s' % obj_agent.task_name)
-            self._agents.append(obj_agent)
-        self._activated_cloud = True
-        self.log.info('start cloud disk prediction')
-
-    def start_local_disk_prediction(self):
-        assert not self._activated_local
-        for dp_agent in [PredictionRunner]:
-            obj_agent = dp_agent(self)
-            if obj_agent:
-                obj_agent.start()
-            else:
-                raise Exception('failed to start task %s' % obj_agent.task_name)
-            self._agents.append(obj_agent)
-        self._activated_local = True
-        self.log.info('start local model disk prediction')
-
-    def stop_disk_prediction(self):
-        assert self._activated_local or self._activated_cloud
-        self.status = {'status': DP_MGR_STAT_DISABLED}
-        while self._agents:
-            dp_agent = self._agents.pop()
-            dp_agent.terminate()
-            dp_agent.join(5)
-            del dp_agent
-        self._activated_local = False
-        self._activated_cloud = False
-        self.log.info('stop disk prediction')
-
-    def shutdown(self):
-        self.shutdown_event.set()
-        super(Module, self).shutdown()
diff --git a/src/pybind/mgr/diskprediction/predictor/__init__.py b/src/pybind/mgr/diskprediction/predictor/__init__.py
deleted file mode 100644 (file)
index d056be7..0000000
+++ /dev/null
@@ -1 +0,0 @@
-'''Initial file for predictors'''
diff --git a/src/pybind/mgr/diskprediction/predictor/disk_failure_predictor.py b/src/pybind/mgr/diskprediction/predictor/disk_failure_predictor.py
deleted file mode 100644 (file)
index bf9b0d7..0000000
+++ /dev/null
@@ -1,265 +0,0 @@
-"""Sample code for disk failure prediction.
-
-This sample code is a community version for anyone who is interested in Machine
-Learning and care about disk failure.
-
-This class provides a disk failure prediction module. Given models dirpath to
-initialize a predictor instance and then use 6 days data to predict. Predict
-function will return a string to indicate disk failure status: "Good",
-"Warning", "Bad", or "Unknown".
-
-An example code is as follows:
-
->>> model = disk_failure_predictor.DiskFailurePredictor()
->>> status = model.initialize("./models")
->>> if status:
->>>     model.predict(disk_days)
-'Bad'
-
-
-Provided by ProphetStor Data Services Inc.
-http://www.prophetstor.com/
-
-"""
-
-from __future__ import print_function
-import os
-import json
-import pickle
-
-
-def get_diskfailurepredictor_path():
-    path = os.path.abspath(__file__)
-    dir_path = os.path.dirname(path)
-    return dir_path
-
-
-class DiskFailurePredictor(object):
-    """Disk failure prediction
-
-    This class implements a disk failure prediction module.
-    """
-
-    CONFIG_FILE = "config.json"
-    EXCLUDED_ATTRS = ['smart_9_raw', 'smart_241_raw', 'smart_242_raw']
-
-    def __init__(self):
-        """
-        This function may throw exception due to wrong file operation.
-        """
-
-        self.model_dirpath = ""
-        self.model_context = {}
-
-    def initialize(self, model_dirpath):
-        """
-        Initialize all models.
-
-        Args: None
-
-        Returns:
-            Error message. If all goes well, return an empty string.
-
-        Raises:
-        """
-
-        config_path = os.path.join(model_dirpath, self.CONFIG_FILE)
-        if not os.path.isfile(config_path):
-            return "Missing config file: " + config_path
-        else:
-            with open(config_path) as f_conf:
-                self.model_context = json.load(f_conf)
-
-        for model_name in self.model_context:
-            model_path = os.path.join(model_dirpath, model_name)
-
-            if not os.path.isfile(model_path):
-                return "Missing model file: " + model_path
-
-        self.model_dirpath = model_dirpath
-
-    def __preprocess(self, disk_days):
-        """
-        Preprocess disk attributes.
-
-        Args:
-            disk_days: Refer to function predict(...).
-
-        Returns:
-            new_disk_days: Processed disk days.
-        """
-
-        req_attrs = []
-        new_disk_days = []
-
-        attr_list = set.intersection(*[set(disk_day.keys())
-                                       for disk_day in disk_days])
-        for attr in attr_list:
-            if (attr.startswith('smart_') and attr.endswith('_raw')) and \
-                    attr not in self.EXCLUDED_ATTRS:
-                req_attrs.append(attr)
-
-        for disk_day in disk_days:
-            new_disk_day = {}
-            for attr in req_attrs:
-                if float(disk_day[attr]) >= 0.0:
-                    new_disk_day[attr] = disk_day[attr]
-
-            new_disk_days.append(new_disk_day)
-
-        return new_disk_days
-
-    @staticmethod
-    def __get_diff_attrs(disk_days):
-        """
-        Get 5 days differential attributes.
-
-        Args:
-            disk_days: Refer to function predict(...).
-
-        Returns:
-            attr_list: All S.M.A.R.T. attributes used in given disk. Here we
-                       use intersection set of all disk days.
-
-            diff_disk_days: A list struct comprises 5 dictionaries, each
-                            dictionary contains differential attributes.
-
-        Raises:
-            Exceptions of wrong list/dict operations.
-        """
-
-        all_attrs = [set(disk_day.keys()) for disk_day in disk_days]
-        attr_list = list(set.intersection(*all_attrs))
-        attr_list = disk_days[0].keys()
-        prev_days = disk_days[:-1]
-        curr_days = disk_days[1:]
-        diff_disk_days = []
-
-        for prev, cur in zip(prev_days, curr_days):
-            diff_disk_days.append({attr:(int(cur[attr]) - int(prev[attr]))
-                                   for attr in attr_list})
-
-        return attr_list, diff_disk_days
-
-    def __get_best_models(self, attr_list):
-        """
-        Find the best model from model list according to given attribute list.
-
-        Args:
-            attr_list: All S.M.A.R.T. attributes used in given disk.
-
-        Returns:
-            modelpath: The best model for the given attribute list.
-            model_attrlist: 'Ordered' attribute list of the returned model.
-                            Must be aware that SMART attributes is in order.
-
-        Raises:
-        """
-
-        models = self.model_context.keys()
-
-        scores = []
-        for model_name in models:
-            scores.append(sum(attr in attr_list
-                              for attr in self.model_context[model_name]))
-        max_score = max(scores)
-
-        # Skip if too few matched attributes.
-        if max_score < 3:
-            print("Too few matched attributes")
-            return None
-
-        best_models = {}
-        best_model_indices = [idx for idx, score in enumerate(scores)
-                              if score > max_score - 2]
-        for model_idx in best_model_indices:
-            model_name = list(models)[model_idx]
-            model_path = os.path.join(self.model_dirpath, model_name)
-            model_attrlist = self.model_context[model_name]
-            best_models[model_path] = model_attrlist
-
-        return best_models
-        # return os.path.join(self.model_dirpath, model_name), model_attrlist
-
-    @staticmethod
-    def __get_ordered_attrs(disk_days, model_attrlist):
-        """
-        Return ordered attributes of given disk days.
-
-        Args:
-            disk_days: Unordered disk days.
-            model_attrlist: Model's ordered attribute list.
-
-        Returns:
-            ordered_attrs: Ordered disk days.
-
-        Raises: None
-        """
-
-        ordered_attrs = []
-
-        for one_day in disk_days:
-            one_day_attrs = []
-
-            for attr in model_attrlist:
-                if attr in one_day:
-                    one_day_attrs.append(one_day[attr])
-                else:
-                    one_day_attrs.append(0)
-
-            ordered_attrs.append(one_day_attrs)
-
-        return ordered_attrs
-
-    def predict(self, disk_days):
-        """
-        Predict using given 6-days disk S.M.A.R.T. attributes.
-
-        Args:
-            disk_days: A list struct comprises 6 dictionaries. These
-                       dictionaries store 'consecutive' days of disk SMART
-                       attributes.
-        Returns:
-            A string indicates prediction result. One of following four strings
-            will be returned according to disk failure status:
-            (1) Good : Disk is health
-            (2) Warning : Disk has some symptoms but may not fail immediately
-            (3) Bad : Disk is in danger and data backup is highly recommended
-            (4) Unknown : Not enough data for prediction.
-
-        Raises:
-            Pickle exceptions
-        """
-
-        all_pred = []
-
-        proc_disk_days = self.__preprocess(disk_days)
-        attr_list, diff_data = DiskFailurePredictor.__get_diff_attrs(proc_disk_days)
-        modellist = self.__get_best_models(attr_list)
-        if modellist is None:
-            return "Unknown"
-
-        for modelpath in modellist:
-            model_attrlist = modellist[modelpath]
-            ordered_data = DiskFailurePredictor.__get_ordered_attrs(
-                diff_data, model_attrlist)
-
-            try:
-                with open(modelpath, 'rb') as f_model:
-                    clf = pickle.load(f_model)
-
-            except UnicodeDecodeError:
-                # Compatibility for python3
-                with open(modelpath, 'rb') as f_model:
-                    clf = pickle.load(f_model, encoding='latin1')
-
-            pred = clf.predict(ordered_data)
-
-            all_pred.append(1 if any(pred) else 0)
-
-        score = 2 ** sum(all_pred) - len(modellist)
-        if score > 10:
-            return "Bad"
-        if score > 4:
-            return "Warning"
-        return "Good"
diff --git a/src/pybind/mgr/diskprediction/predictor/models/config.json b/src/pybind/mgr/diskprediction/predictor/models/config.json
deleted file mode 100644 (file)
index 9a1485c..0000000
+++ /dev/null
@@ -1,77 +0,0 @@
-{
-"svm_123.pkl": ["smart_197_raw", "smart_183_raw", "smart_200_raw", "smart_194_raw", "smart_254_raw", "smart_252_raw", "smart_4_raw", "smart_222_raw", "smart_187_raw", "smart_184_raw"],
-"svm_105.pkl": ["smart_197_raw", "smart_4_raw", "smart_5_raw", "smart_252_raw", "smart_184_raw", "smart_223_raw", "smart_198_raw", "smart_10_raw", "smart_189_raw", "smart_222_raw"],
-"svm_82.pkl":["smart_184_raw", "smart_2_raw", "smart_187_raw", "smart_225_raw", "smart_198_raw", "smart_197_raw", "smart_4_raw", "smart_13_raw", "smart_188_raw", "smart_251_raw"],
-"svm_186.pkl":["smart_3_raw", "smart_11_raw", "smart_198_raw", "smart_250_raw", "smart_13_raw", "smart_200_raw", "smart_224_raw", "smart_187_raw", "smart_22_raw", "smart_4_raw", "smart_220_raw"],
-"svm_14.pkl":["smart_12_raw", "smart_226_raw", "smart_187_raw", "smart_196_raw", "smart_5_raw", "smart_183_raw", "smart_255_raw", "smart_250_raw", "smart_201_raw", "smart_8_raw"],
-"svm_10.pkl":["smart_251_raw", "smart_4_raw", "smart_223_raw", "smart_13_raw", "smart_255_raw", "smart_188_raw", "smart_197_raw", "smart_201_raw", "smart_250_raw", "smart_15_raw"],
-"svm_235.pkl":["smart_15_raw", "smart_255_raw", "smart_252_raw", "smart_197_raw", "smart_250_raw", "smart_254_raw", "smart_13_raw", "smart_251_raw", "smart_198_raw", "smart_189_raw", "smart_191_raw"],
-"svm_234.pkl":["smart_187_raw", "smart_183_raw", "smart_3_raw", "smart_4_raw", "smart_222_raw", "smart_184_raw", "smart_5_raw", "smart_198_raw", "smart_200_raw", "smart_8_raw", "smart_10_raw"],
-"svm_119.pkl":["smart_254_raw", "smart_8_raw", "smart_183_raw", "smart_184_raw", "smart_195_raw", "smart_252_raw", "smart_191_raw", "smart_10_raw", "smart_200_raw", "smart_197_raw"],
-"svm_227.pkl":["smart_254_raw", "smart_189_raw", "smart_225_raw", "smart_224_raw", "smart_197_raw", "smart_223_raw", "smart_4_raw", "smart_183_raw", "smart_11_raw", "smart_184_raw", "smart_13_raw"],
-"svm_18.pkl":["smart_197_raw", "smart_3_raw", "smart_220_raw", "smart_193_raw", "smart_10_raw", "smart_187_raw", "smart_188_raw", "smart_225_raw", "smart_194_raw", "smart_13_raw"],
-"svm_78.pkl":["smart_10_raw", "smart_183_raw", "smart_191_raw", "smart_13_raw", "smart_198_raw", "smart_22_raw", "smart_195_raw", "smart_12_raw", "smart_224_raw", "smart_200_raw"],
-"svm_239.pkl":["smart_3_raw", "smart_254_raw", "smart_199_raw", "smart_225_raw", "smart_187_raw", "smart_195_raw", "smart_197_raw", "smart_2_raw", "smart_193_raw", "smart_220_raw", "smart_183_raw"],
-"svm_174.pkl":["smart_183_raw", "smart_196_raw", "smart_225_raw", "smart_189_raw", "smart_4_raw", "smart_3_raw", "smart_9_raw", "smart_198_raw", "smart_15_raw", "smart_5_raw", "smart_194_raw"],
-"svm_104.pkl":["smart_12_raw", "smart_198_raw", "smart_197_raw", "smart_4_raw", "smart_240_raw", "smart_187_raw", "smart_225_raw", "smart_8_raw", "smart_3_raw", "smart_2_raw"],
-"svm_12.pkl":["smart_222_raw", "smart_251_raw", "smart_194_raw", "smart_9_raw", "smart_184_raw", "smart_191_raw", "smart_187_raw", "smart_255_raw", "smart_4_raw", "smart_11_raw"],
-"svm_97.pkl":["smart_15_raw", "smart_197_raw", "smart_190_raw", "smart_199_raw", "smart_200_raw", "smart_12_raw", "smart_191_raw", "smart_254_raw", "smart_194_raw", "smart_201_raw"],
-"svm_118.pkl":["smart_11_raw", "smart_225_raw", "smart_196_raw", "smart_197_raw", "smart_198_raw", "smart_200_raw", "smart_3_raw", "smart_10_raw", "smart_191_raw", "smart_22_raw"],
-"svm_185.pkl":["smart_191_raw", "smart_254_raw", "smart_3_raw", "smart_190_raw", "smart_15_raw", "smart_22_raw", "smart_2_raw", "smart_198_raw", "smart_13_raw", "smart_226_raw", "smart_225_raw"],
-"svm_206.pkl":["smart_183_raw", "smart_192_raw", "smart_197_raw", "smart_255_raw", "smart_187_raw", "smart_254_raw", "smart_198_raw", "smart_13_raw", "smart_226_raw", "smart_240_raw", "smart_8_raw"],
-"svm_225.pkl":["smart_224_raw", "smart_11_raw", "smart_5_raw", "smart_4_raw", "smart_225_raw", "smart_197_raw", "smart_15_raw", "smart_183_raw", "smart_193_raw", "smart_190_raw", "smart_187_raw"],
-"svm_169.pkl":["smart_252_raw", "smart_183_raw", "smart_254_raw", "smart_11_raw", "smart_193_raw", "smart_22_raw", "smart_226_raw", "smart_189_raw", "smart_225_raw", "smart_198_raw", "smart_200_raw"],
-"svm_79.pkl":["smart_184_raw", "smart_196_raw", "smart_4_raw", "smart_226_raw", "smart_199_raw", "smart_187_raw", "smart_193_raw", "smart_188_raw", "smart_12_raw", "smart_250_raw"],
-"svm_69.pkl":["smart_187_raw", "smart_9_raw", "smart_200_raw", "smart_11_raw", "smart_252_raw", "smart_189_raw", "smart_4_raw", "smart_188_raw", "smart_255_raw", "smart_201_raw"],
-"svm_201.pkl":["smart_224_raw", "smart_8_raw", "smart_250_raw", "smart_2_raw", "smart_198_raw", "smart_15_raw", "smart_193_raw", "smart_223_raw", "smart_3_raw", "smart_11_raw", "smart_191_raw"],
-"svm_114.pkl":["smart_226_raw", "smart_188_raw", "smart_2_raw", "smart_11_raw", "smart_4_raw", "smart_193_raw", "smart_184_raw", "smart_194_raw", "smart_198_raw", "smart_13_raw"],
-"svm_219.pkl":["smart_12_raw", "smart_22_raw", "smart_8_raw", "smart_191_raw", "smart_197_raw", "smart_254_raw", "smart_15_raw", "smart_193_raw", "smart_199_raw", "smart_225_raw", "smart_192_raw"],
-"svm_168.pkl":["smart_255_raw", "smart_191_raw", "smart_193_raw", "smart_220_raw", "smart_5_raw", "smart_3_raw", "smart_222_raw", "smart_223_raw", "smart_197_raw", "smart_196_raw", "smart_22_raw"],
-"svm_243.pkl":["smart_11_raw", "smart_255_raw", "smart_10_raw", "smart_189_raw", "smart_225_raw", "smart_240_raw", "smart_222_raw", "smart_197_raw", "smart_183_raw", "smart_198_raw", "smart_12_raw"],
-"svm_195.pkl":["smart_183_raw", "smart_5_raw", "smart_11_raw", "smart_197_raw", "smart_15_raw", "smart_9_raw", "smart_4_raw", "smart_220_raw", "smart_12_raw", "smart_192_raw", "smart_240_raw"],
-"svm_222.pkl":["smart_10_raw", "smart_13_raw", "smart_188_raw", "smart_15_raw", "smart_192_raw", "smart_224_raw", "smart_225_raw", "smart_187_raw", "smart_222_raw", "smart_220_raw", "smart_252_raw"],
-"svm_62.pkl":["smart_196_raw", "smart_251_raw", "smart_187_raw", "smart_224_raw", "smart_11_raw", "smart_12_raw", "smart_8_raw", "smart_199_raw", "smart_220_raw", "smart_195_raw"],
-"svm_151.pkl":["smart_187_raw", "smart_223_raw", "smart_200_raw", "smart_189_raw", "smart_251_raw", "smart_255_raw", "smart_222_raw", "smart_192_raw", "smart_12_raw", "smart_183_raw", "smart_22_raw"],
-"svm_125.pkl":["smart_9_raw", "smart_252_raw", "smart_197_raw", "smart_251_raw", "smart_11_raw", "smart_12_raw", "smart_188_raw", "smart_240_raw", "smart_10_raw", "smart_223_raw"],
-"svm_124.pkl":["smart_193_raw", "smart_187_raw", "smart_183_raw", "smart_11_raw", "smart_10_raw", "smart_8_raw", "smart_194_raw", "smart_189_raw", "smart_222_raw", "smart_191_raw"],
-"svm_67.pkl":["smart_2_raw", "smart_8_raw", "smart_225_raw", "smart_240_raw", "smart_13_raw", "smart_5_raw", "smart_187_raw", "smart_198_raw", "smart_199_raw", "smart_3_raw"],
-"svm_115.pkl":["smart_222_raw", "smart_193_raw", "smart_223_raw", "smart_195_raw", "smart_252_raw", "smart_189_raw", "smart_199_raw", "smart_187_raw", "smart_15_raw", "smart_184_raw"],
-"svm_1.pkl":["smart_201_raw", "smart_8_raw", "smart_200_raw", "smart_252_raw", "smart_251_raw", "smart_187_raw", "smart_9_raw", "smart_188_raw", "smart_15_raw", "smart_184_raw"],
-"svm_112.pkl":["smart_220_raw", "smart_197_raw", "smart_10_raw", "smart_188_raw", "smart_12_raw", "smart_4_raw", "smart_196_raw", "smart_3_raw", "smart_240_raw", "smart_225_raw"],
-"svm_138.pkl":["smart_183_raw", "smart_10_raw", "smart_191_raw", "smart_195_raw", "smart_223_raw", "smart_189_raw", "smart_187_raw", "smart_255_raw", "smart_226_raw", "smart_8_raw"],
-"svm_229.pkl":["smart_224_raw", "smart_8_raw", "smart_192_raw", "smart_220_raw", "smart_195_raw", "smart_183_raw", "smart_250_raw", "smart_187_raw", "smart_225_raw", "smart_4_raw", "smart_252_raw"],
-"svm_145.pkl":["smart_190_raw", "smart_8_raw", "smart_226_raw", "smart_184_raw", "smart_225_raw", "smart_220_raw", "smart_193_raw", "smart_183_raw", "smart_201_raw", "smart_187_raw", "smart_2_raw"],
-"svm_59.pkl":["smart_188_raw", "smart_11_raw", "smart_184_raw", "smart_2_raw", "smart_220_raw", "smart_198_raw", "smart_225_raw", "smart_240_raw", "smart_197_raw", "smart_251_raw"],
-"svm_204.pkl":["smart_15_raw", "smart_240_raw", "smart_225_raw", "smart_223_raw", "smart_252_raw", "smart_22_raw", "smart_200_raw", "smart_13_raw", "smart_220_raw", "smart_198_raw", "smart_191_raw"],
-"svm_88.pkl":["smart_198_raw", "smart_3_raw", "smart_8_raw", "smart_225_raw", "smart_251_raw", "smart_222_raw", "smart_188_raw", "smart_10_raw", "smart_240_raw", "smart_189_raw"],
-"svm_182.pkl":["smart_10_raw", "smart_190_raw", "smart_250_raw", "smart_15_raw", "smart_193_raw", "smart_22_raw", "smart_200_raw", "smart_8_raw", "smart_4_raw", "smart_187_raw", "smart_9_raw"],
-"svm_61.pkl":["smart_5_raw", "smart_12_raw", "smart_9_raw", "smart_198_raw", "smart_195_raw", "smart_252_raw", "smart_15_raw", "smart_240_raw", "smart_255_raw", "smart_224_raw"],
-"svm_50.pkl":["smart_220_raw", "smart_5_raw", "smart_194_raw", "smart_250_raw", "smart_15_raw", "smart_240_raw", "smart_8_raw", "smart_198_raw", "smart_224_raw", "smart_191_raw"],
-"svm_210.pkl":["smart_8_raw", "smart_15_raw", "smart_195_raw", "smart_224_raw", "smart_5_raw", "smart_191_raw", "smart_198_raw", "smart_225_raw", "smart_200_raw", "smart_251_raw", "smart_240_raw"],
-"svm_16.pkl":["smart_222_raw", "smart_10_raw", "smart_250_raw", "smart_189_raw", "smart_191_raw", "smart_2_raw", "smart_5_raw", "smart_193_raw", "smart_9_raw", "smart_187_raw"],
-"svm_85.pkl":["smart_252_raw", "smart_184_raw", "smart_9_raw", "smart_5_raw", "smart_254_raw", "smart_3_raw", "smart_195_raw", "smart_10_raw", "smart_12_raw", "smart_222_raw"],
-"svm_36.pkl":["smart_201_raw", "smart_251_raw", "smart_184_raw", "smart_3_raw", "smart_5_raw", "smart_183_raw", "smart_194_raw", "smart_195_raw", "smart_224_raw", "smart_2_raw"],
-"svm_33.pkl":["smart_223_raw", "smart_254_raw", "smart_225_raw", "smart_9_raw", "smart_199_raw", "smart_5_raw", "smart_189_raw", "smart_194_raw", "smart_240_raw", "smart_4_raw"],
-"svm_3.pkl":["smart_225_raw", "smart_194_raw", "smart_3_raw", "smart_189_raw", "smart_9_raw", "smart_254_raw", "smart_240_raw", "smart_5_raw", "smart_255_raw", "smart_223_raw"],
-"svm_93.pkl":["smart_8_raw", "smart_188_raw", "smart_5_raw", "smart_10_raw", "smart_222_raw", "smart_2_raw", "smart_254_raw", "smart_12_raw", "smart_193_raw", "smart_224_raw"],
-"svm_120.pkl":["smart_189_raw", "smart_224_raw", "smart_222_raw", "smart_193_raw", "smart_5_raw", "smart_201_raw", "smart_8_raw", "smart_254_raw", "smart_194_raw", "smart_22_raw"],
-"svm_128.pkl":["smart_195_raw", "smart_184_raw", "smart_251_raw", "smart_8_raw", "smart_5_raw", "smart_196_raw", "smart_10_raw", "smart_4_raw", "smart_225_raw", "smart_191_raw"],
-"svm_212.pkl":["smart_225_raw", "smart_192_raw", "smart_10_raw", "smart_12_raw", "smart_222_raw", "smart_184_raw", "smart_13_raw", "smart_226_raw", "smart_5_raw", "smart_201_raw", "smart_22_raw"],
-"svm_221.pkl":["smart_255_raw", "smart_2_raw", "smart_224_raw", "smart_192_raw", "smart_252_raw", "smart_13_raw", "smart_183_raw", "smart_193_raw", "smart_15_raw", "smart_199_raw", "smart_200_raw"],
-"svm_223.pkl":["smart_4_raw", "smart_194_raw", "smart_9_raw", "smart_255_raw", "smart_188_raw", "smart_201_raw", "smart_3_raw", "smart_226_raw", "smart_192_raw", "smart_251_raw", "smart_191_raw"],
-"svm_44.pkl":["smart_255_raw", "smart_11_raw", "smart_200_raw", "smart_3_raw", "smart_195_raw", "smart_201_raw", "smart_4_raw", "smart_5_raw", "smart_10_raw", "smart_191_raw"],
-"svm_213.pkl":["smart_22_raw", "smart_191_raw", "smart_183_raw", "smart_4_raw", "smart_194_raw", "smart_255_raw", "smart_254_raw", "smart_193_raw", "smart_11_raw", "smart_10_raw", "smart_220_raw"],
-"svm_131.pkl":["smart_22_raw", "smart_194_raw", "smart_184_raw", "smart_250_raw", "smart_10_raw", "smart_189_raw", "smart_183_raw", "smart_240_raw", "smart_12_raw", "smart_252_raw"],
-"svm_6.pkl":["smart_194_raw", "smart_250_raw", "smart_223_raw", "smart_224_raw", "smart_184_raw", "smart_191_raw", "smart_201_raw", "smart_9_raw", "smart_252_raw", "smart_3_raw"],
-"svm_161.pkl":["smart_255_raw", "smart_222_raw", "smart_226_raw", "smart_254_raw", "smart_183_raw", "smart_22_raw", "smart_12_raw", "smart_190_raw", "smart_11_raw", "smart_192_raw", "smart_251_raw"],
-"svm_72.pkl":["smart_13_raw", "smart_184_raw", "smart_223_raw", "smart_240_raw", "smart_250_raw", "smart_251_raw", "smart_201_raw", "smart_196_raw", "smart_5_raw", "smart_4_raw"],
-"svm_27.pkl":["smart_189_raw", "smart_188_raw", "smart_255_raw", "smart_251_raw", "smart_240_raw", "smart_15_raw", "smart_9_raw", "smart_191_raw", "smart_226_raw", "smart_10_raw"],
-"svm_141.pkl":["smart_9_raw", "smart_191_raw", "smart_2_raw", "smart_226_raw", "smart_13_raw", "smart_22_raw", "smart_193_raw", "smart_222_raw", "smart_220_raw", "smart_225_raw", "smart_3_raw"],
-"svm_57.pkl":["smart_12_raw", "smart_252_raw", "smart_190_raw", "smart_226_raw", "smart_10_raw", "smart_189_raw", "smart_193_raw", "smart_2_raw", "smart_9_raw", "smart_223_raw"],
-"svm_236.pkl":["smart_200_raw", "smart_189_raw", "smart_226_raw", "smart_252_raw", "smart_250_raw", "smart_193_raw", "smart_13_raw", "smart_2_raw", "smart_254_raw", "smart_22_raw", "smart_9_raww"],
-"svm_208.pkl":["smart_223_raw", "smart_15_raw", "smart_251_raw", "smart_5_raw", "smart_198_raw", "smart_252_raw", "smart_4_raw", "smart_8_raw", "smart_220_raw", "smart_254_raw", "smart_193_raw"],
-"svm_230.pkl":["smart_184_raw", "smart_5_raw", "smart_191_raw", "smart_198_raw", "smart_11_raw", "smart_255_raw", "smart_189_raw", "smart_254_raw", "smart_196_raw", "smart_199_raw", "smart_223_raw"],
-"svm_134.pkl":["smart_8_raw", "smart_194_raw", "smart_4_raw", "smart_189_raw", "smart_223_raw", "smart_5_raw", "smart_187_raw", "smart_9_raw", "smart_192_raw", "smart_220_raw"],
-"svm_71.pkl":["smart_220_raw", "smart_13_raw", "smart_194_raw", "smart_197_raw", "smart_192_raw", "smart_22_raw", "smart_184_raw", "smart_199_raw", "smart_222_raw", "smart_183_raw"],
-"svm_109.pkl":["smart_224_raw", "smart_252_raw", "smart_2_raw", "smart_200_raw", "smart_5_raw", "smart_194_raw", "smart_222_raw", "smart_198_raw", "smart_4_raw", "smart_13_raw"]
-}
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_1.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_1.pkl
deleted file mode 100644 (file)
index 5eb30f3..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_1.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_10.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_10.pkl
deleted file mode 100644 (file)
index 9259c1e..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_10.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_104.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_104.pkl
deleted file mode 100644 (file)
index d5d5cf5..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_104.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_105.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_105.pkl
deleted file mode 100644 (file)
index 4aadc3c..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_105.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_109.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_109.pkl
deleted file mode 100644 (file)
index c99c353..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_109.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_112.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_112.pkl
deleted file mode 100644 (file)
index 367a330..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_112.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_114.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_114.pkl
deleted file mode 100644 (file)
index 946d5ce..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_114.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_115.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_115.pkl
deleted file mode 100644 (file)
index ff83492..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_115.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_118.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_118.pkl
deleted file mode 100644 (file)
index eec8689..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_118.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_119.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_119.pkl
deleted file mode 100644 (file)
index 6a26c05..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_119.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_12.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_12.pkl
deleted file mode 100644 (file)
index 5cbe977..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_12.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_120.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_120.pkl
deleted file mode 100644 (file)
index d2041c2..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_120.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_123.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_123.pkl
deleted file mode 100644 (file)
index 0ab6187..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_123.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_124.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_124.pkl
deleted file mode 100644 (file)
index 8f9ea4e..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_124.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_125.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_125.pkl
deleted file mode 100644 (file)
index 4d49900..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_125.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_128.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_128.pkl
deleted file mode 100644 (file)
index 6a18726..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_128.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_131.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_131.pkl
deleted file mode 100644 (file)
index e6a55dc..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_131.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_134.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_134.pkl
deleted file mode 100644 (file)
index 51171e0..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_134.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_138.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_138.pkl
deleted file mode 100644 (file)
index bc98e0c..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_138.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_14.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_14.pkl
deleted file mode 100644 (file)
index c4547dc..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_14.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_141.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_141.pkl
deleted file mode 100644 (file)
index 86d9f38..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_141.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_145.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_145.pkl
deleted file mode 100644 (file)
index 24ff962..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_145.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_151.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_151.pkl
deleted file mode 100644 (file)
index 92bfd3f..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_151.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_16.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_16.pkl
deleted file mode 100644 (file)
index 11664b3..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_16.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_161.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_161.pkl
deleted file mode 100644 (file)
index 2d42168..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_161.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_168.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_168.pkl
deleted file mode 100644 (file)
index 12a811c..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_168.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_169.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_169.pkl
deleted file mode 100644 (file)
index 0c51446..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_169.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_174.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_174.pkl
deleted file mode 100644 (file)
index d2945ce..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_174.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_18.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_18.pkl
deleted file mode 100644 (file)
index d05520c..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_18.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_182.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_182.pkl
deleted file mode 100644 (file)
index 7fcfb3c..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_182.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_185.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_185.pkl
deleted file mode 100644 (file)
index 785301c..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_185.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_186.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_186.pkl
deleted file mode 100644 (file)
index 4ea83da..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_186.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_195.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_195.pkl
deleted file mode 100644 (file)
index 12273f7..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_195.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_201.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_201.pkl
deleted file mode 100644 (file)
index c866cf0..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_201.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_204.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_204.pkl
deleted file mode 100644 (file)
index 8cf1c3a..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_204.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_206.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_206.pkl
deleted file mode 100644 (file)
index cba64e8..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_206.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_208.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_208.pkl
deleted file mode 100644 (file)
index ba0df0a..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_208.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_210.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_210.pkl
deleted file mode 100644 (file)
index 6b5bee2..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_210.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_212.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_212.pkl
deleted file mode 100644 (file)
index 11eafc6..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_212.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_213.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_213.pkl
deleted file mode 100644 (file)
index 0b8475c..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_213.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_219.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_219.pkl
deleted file mode 100644 (file)
index 4a248c1..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_219.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_221.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_221.pkl
deleted file mode 100644 (file)
index e37c6b4..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_221.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_222.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_222.pkl
deleted file mode 100644 (file)
index e543038..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_222.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_223.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_223.pkl
deleted file mode 100644 (file)
index 8b208f4..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_223.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_225.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_225.pkl
deleted file mode 100644 (file)
index 3f2b629..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_225.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_227.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_227.pkl
deleted file mode 100644 (file)
index 5e4fb56..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_227.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_229.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_229.pkl
deleted file mode 100644 (file)
index 1e9c335..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_229.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_230.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_230.pkl
deleted file mode 100644 (file)
index 36f8205..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_230.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_234.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_234.pkl
deleted file mode 100644 (file)
index 199f9ba..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_234.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_235.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_235.pkl
deleted file mode 100644 (file)
index d986526..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_235.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_236.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_236.pkl
deleted file mode 100644 (file)
index 160e22f..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_236.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_239.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_239.pkl
deleted file mode 100644 (file)
index 8d98572..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_239.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_243.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_243.pkl
deleted file mode 100644 (file)
index 4fca95e..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_243.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_27.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_27.pkl
deleted file mode 100644 (file)
index 011974e..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_27.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_3.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_3.pkl
deleted file mode 100644 (file)
index e5e97a8..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_3.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_33.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_33.pkl
deleted file mode 100644 (file)
index e709d7b..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_33.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_36.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_36.pkl
deleted file mode 100644 (file)
index 3d87b8b..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_36.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_44.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_44.pkl
deleted file mode 100644 (file)
index 9abcece..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_44.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_50.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_50.pkl
deleted file mode 100644 (file)
index b7ce5ed..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_50.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_57.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_57.pkl
deleted file mode 100644 (file)
index fe78328..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_57.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_59.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_59.pkl
deleted file mode 100644 (file)
index 7621777..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_59.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_6.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_6.pkl
deleted file mode 100644 (file)
index 4fb09d3..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_6.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_61.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_61.pkl
deleted file mode 100644 (file)
index 319fc5f..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_61.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_62.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_62.pkl
deleted file mode 100644 (file)
index 25b21ae..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_62.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_67.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_67.pkl
deleted file mode 100644 (file)
index 1e6e738..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_67.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_69.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_69.pkl
deleted file mode 100644 (file)
index 22d349a..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_69.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_71.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_71.pkl
deleted file mode 100644 (file)
index e0760ad..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_71.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_72.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_72.pkl
deleted file mode 100644 (file)
index 5096aa8..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_72.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_78.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_78.pkl
deleted file mode 100644 (file)
index 7958f3b..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_78.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_79.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_79.pkl
deleted file mode 100644 (file)
index 2ed3a0f..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_79.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_82.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_82.pkl
deleted file mode 100644 (file)
index 2e18840..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_82.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_85.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_85.pkl
deleted file mode 100644 (file)
index 88161af..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_85.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_88.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_88.pkl
deleted file mode 100644 (file)
index 7156339..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_88.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_93.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_93.pkl
deleted file mode 100644 (file)
index 703429f..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_93.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/predictor/models/svm_97.pkl b/src/pybind/mgr/diskprediction/predictor/models/svm_97.pkl
deleted file mode 100644 (file)
index 9653d20..0000000
Binary files a/src/pybind/mgr/diskprediction/predictor/models/svm_97.pkl and /dev/null differ
diff --git a/src/pybind/mgr/diskprediction/requirements.txt b/src/pybind/mgr/diskprediction/requirements.txt
deleted file mode 100644 (file)
index 3a22a07..0000000
+++ /dev/null
@@ -1,14 +0,0 @@
-google==2.0.1
-google-api-python-client==1.7.3
-google-auth==1.5.0
-google-auth-httplib2==0.0.3
-google-gax==0.12.5
-googleapis-common-protos==1.5.3
-grpc==0.3.post19
-grpc-google-logging-v2==0.8.1
-grpc-google-pubsub-v1==0.8.1
-grpcio==1.14.1
-mock==2.0.0
-numpy==1.15.1
-scikit-learn==0.19.2
-scipy==1.1.0
diff --git a/src/pybind/mgr/diskprediction/task.py b/src/pybind/mgr/diskprediction/task.py
deleted file mode 100644 (file)
index 914ea59..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import time\r
-from threading import Event, Thread\r
-\r
-from .agent.metrics.ceph_cluster import CephClusterAgent\r
-from .agent.metrics.ceph_mon_osd import CephMonOsdAgent\r
-from .agent.metrics.ceph_pool import CephPoolAgent\r
-from .agent.metrics.db_relay import DBRelayAgent\r
-from .agent.metrics.sai_agent import SAIAgent\r
-from .agent.metrics.sai_cluster import SAICluserAgent\r
-from .agent.metrics.sai_disk import SAIDiskAgent\r
-from .agent.metrics.sai_disk_smart import SAIDiskSmartAgent\r
-from .agent.metrics.sai_host import SAIHostAgent\r
-from .agent.predict.prediction import PredictionAgent\r
-from .common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK, DP_MGR_STAT_WARNING\r
-\r
-\r
-class AgentRunner(Thread):\r
-\r
-    task_name = ''\r
-    interval_key = ''\r
-    agents = []\r
-\r
-    def __init__(self, mgr_module, agent_timeout=60):\r
-        """\r
-\r
-        :param mgr_module: parent ceph mgr module\r
-        :param agent_timeout: (unit seconds) agent execute timeout value, default: 60 secs\r
-        """\r
-        Thread.__init__(self)\r
-        self._agent_timeout = agent_timeout\r
-        self._module_inst = mgr_module\r
-        self._log = mgr_module.log\r
-        self._obj_sender = None\r
-        self._start_time = None\r
-        self._th = None\r
-\r
-        self.exit = False\r
-        self.event = Event()\r
-        self.task_interval = \\r
-            int(self._module_inst.get_configuration(self.interval_key))\r
-\r
-    def terminate(self):\r
-        self.exit = True\r
-        self.event.set()\r
-        self._log.info('PDS terminate %s complete' % self.task_name)\r
-\r
-    def run(self):\r
-        self._start_time = time.time()\r
-        self._log.debug(\r
-            'start %s, interval: %s'\r
-            % (self.task_name, self.task_interval))\r
-        while not self.exit:\r
-            self.run_agents()\r
-            if self.event:\r
-                self.event.wait(int(self.task_interval))\r
-                self.event.clear()\r
-                self._log.info(\r
-                    'completed %s(%s)' % (self.task_name, time.time()-self._start_time))\r
-\r
-    def run_agents(self):\r
-        try:\r
-            self._log.debug('run_agents %s' % self.task_name)\r
-            model = self._module_inst.get_configuration('diskprediction_config_mode')\r
-            if model.lower() == 'cloud':\r
-                # from .common.restapiclient import RestApiClient, gen_configuration\r
-                from .common.grpcclient import GRPcClient, gen_configuration\r
-                conf = gen_configuration(\r
-                    host=self._module_inst.get_configuration('diskprediction_server'),\r
-                    user=self._module_inst.get_configuration('diskprediction_user'),\r
-                    password=self._module_inst.get_configuration(\r
-                        'diskprediction_password'),\r
-                    port=self._module_inst.get_configuration('diskprediction_port'),\r
-                    cert_context=self._module_inst.get_configuration('diskprediction_cert_context'),\r
-                    mgr_inst=self._module_inst,\r
-                    ssl_target_name=self._module_inst.get_configuration('diskprediction_ssl_target_name_override'),\r
-                    default_authority=self._module_inst.get_configuration('diskprediction_default_authority'))\r
-                self._obj_sender = GRPcClient(conf)\r
-            else:\r
-                from .common.localpredictor import LocalPredictor, gen_configuration\r
-                conf = gen_configuration(mgr_inst=self._module_inst)\r
-                self._obj_sender = LocalPredictor(conf)\r
-            if not self._obj_sender:\r
-                self._log.error('invalid diskprediction sender')\r
-                self._module_inst.status = \\r
-                    {'status': DP_MGR_STAT_FAILED,\r
-                     'reason': 'invalid diskprediction sender'}\r
-                return\r
-            if self._obj_sender.test_connection():\r
-                self._module_inst.status = {'status': DP_MGR_STAT_OK}\r
-                self._log.debug('succeed to test connection')\r
-                self._run()\r
-            else:\r
-                self._log.error('failed to test connection')\r
-                self._module_inst.status = \\r
-                    {'status': DP_MGR_STAT_FAILED,\r
-                     'reason': 'failed to test connection'}\r
-        except Exception as e:\r
-            self._module_inst.status = \\r
-                {'status': DP_MGR_STAT_FAILED,\r
-                 'reason': 'failed to start %s agents, %s'\r
-                           % (self.task_name, str(e))}\r
-            self._log.error(\r
-                'failed to start %s agents, %s' % (self.task_name, str(e)))\r
-\r
-    def _run(self):\r
-        self._log.debug('%s run' % self.task_name)\r
-        for agent in self.agents:\r
-            retry_count = 3\r
-            while retry_count:\r
-                retry_count -= 1\r
-                try:\r
-                    obj_agent = agent(\r
-                        self._module_inst, self._obj_sender,\r
-                        self._agent_timeout)\r
-                    obj_agent.run()\r
-                    break\r
-                except Exception as e:\r
-                    if str(e).find('configuring') >= 0:\r
-                        self._log.debug(\r
-                            'failed to execute {}, {}, retry again.'.format(\r
-                                agent.measurement, str(e)))\r
-                        time.sleep(1)\r
-                        continue\r
-                    else:\r
-                        self._module_inst.status = \\r
-                            {'status': DP_MGR_STAT_WARNING,\r
-                             'reason': 'failed to execute {}, {}'.format(\r
-                                agent.measurement, ';'.join(str(e).split('\n\t')))}\r
-                        self._log.warning(\r
-                            'failed to execute {}, {}'.format(\r
-                                agent.measurement, ';'.join(str(e).split('\n\t'))))\r
-                        break\r
-\r
-\r
-class MetricsRunner(AgentRunner):\r
-\r
-    task_name = 'Metrics Agent'\r
-    interval_key = 'diskprediction_upload_metrics_interval'\r
-    agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent,\r
-              SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent,\r
-              SAIAgent]\r
-\r
-\r
-class PredictionRunner(AgentRunner):\r
-\r
-    task_name = 'Prediction Agent'\r
-    interval_key = 'diskprediction_retrieve_prediction_interval'\r
-    agents = [PredictionAgent]\r
-\r
-\r
-class SmartRunner(AgentRunner):\r
-\r
-    task_name = 'Smart data Agent'\r
-    interval_key = 'diskprediction_upload_smart_interval'\r
-    agents = [SAIDiskSmartAgent]\r
diff --git a/src/pybind/mgr/diskprediction/test/__init__.py b/src/pybind/mgr/diskprediction/test/__init__.py
deleted file mode 100644 (file)
index 1f19be5..0000000
+++ /dev/null
@@ -1 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
diff --git a/src/pybind/mgr/diskprediction/test/test_agents.py b/src/pybind/mgr/diskprediction/test/test_agents.py
deleted file mode 100644 (file)
index 55999db..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-
-import time
-import mock
-
-from ..agent.metrics.ceph_cluster import CephClusterAgent
-from ..agent.metrics.ceph_mon_osd import CephMonOsdAgent
-from ..agent.metrics.ceph_pool import CephPoolAgent
-from ..agent.metrics.db_relay import DBRelayAgent
-from ..agent.metrics.sai_agent import SAIAgent
-from ..agent.metrics.sai_cluster import SAICluserAgent
-from ..agent.metrics.sai_disk import SAIDiskAgent
-from ..agent.metrics.sai_disk_smart import SAIDiskSmartAgent
-from ..agent.metrics.sai_host import SAIHostAgent
-from ..agent.predict.prediction import PredictionAgent
-from ..common import DummyResonse
-
-TEMP_RESPONSE = {
-    "disk_domain_id": 'abc',
-    "near_failure": 'Good',
-    "predicted": int(time.time() * (1000 ** 3))}
-
-def generate_sender_mock():
-    sender_mock = mock.MagicMock()
-    sender = sender_mock
-    status_info = dict()
-    status_info['measurement'] = None
-    status_info['success_count'] = 1
-    status_info['failure_count'] = 0
-    sender_mock.send_info.return_value = status_info
-
-    query_value = DummyResonse()
-    query_value.status_code = 200
-    query_value.resp_json = TEMP_RESPONSE
-    sender_mock.query_info.return_value = query_value
-    return sender
-
-
-def test_agents(mgr_inst, sender=None):
-    if sender is None:
-        sender = generate_sender_mock()
-
-    metrics_agents = \
-        [CephClusterAgent, CephMonOsdAgent, CephPoolAgent, DBRelayAgent,
-         SAIAgent, SAICluserAgent, SAIDiskAgent, SAIDiskSmartAgent,
-         SAIHostAgent, PredictionAgent]
-    for agent in metrics_agents:
-        obj_agent = agent(mgr_inst, sender)
-        obj_agent.run()
diff --git a/src/pybind/mgr/diskprediction_cloud/__init__.py b/src/pybind/mgr/diskprediction_cloud/__init__.py
new file mode 100644 (file)
index 0000000..8f210ac
--- /dev/null
@@ -0,0 +1 @@
+from .module import Module
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/__init__.py
new file mode 100644 (file)
index 0000000..c7702e5
--- /dev/null
@@ -0,0 +1,38 @@
+from __future__ import absolute_import\r
+\r
+from ..common import timeout, TimeoutError\r
+\r
+\r
+class BaseAgent(object):\r
+\r
+    measurement = ''\r
+\r
+    def __init__(self, mgr_module, obj_sender, timeout=30):\r
+        self.data = []\r
+        self._client = None\r
+        self._client = obj_sender\r
+        self._logger = mgr_module.log\r
+        self._module_inst = mgr_module\r
+        self._timeout = timeout\r
+\r
+    def run(self):\r
+        try:\r
+            self._collect_data()\r
+            self._run()\r
+        except TimeoutError:\r
+            self._logger.error('{} failed to execute {} task'.format(\r
+                __name__, self.measurement))\r
+\r
+    def __nonzero__(self):\r
+        if not self._module_inst:\r
+            return False\r
+        else:\r
+            return True\r
+\r
+    @timeout()\r
+    def _run(self):\r
+        pass\r
+\r
+    @timeout()\r
+    def _collect_data(self):\r
+        pass\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py
new file mode 100644 (file)
index 0000000..9e7e5b0
--- /dev/null
@@ -0,0 +1,61 @@
+from __future__ import absolute_import\r
+\r
+from .. import BaseAgent\r
+from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING, DP_MGR_STAT_OK\r
+\r
+AGENT_VERSION = '1.0.0'\r
+\r
+\r
+class MetricsField(object):\r
+    def __init__(self):\r
+        self.tags = {}\r
+        self.fields = {}\r
+        self.timestamp = None\r
+\r
+    def __str__(self):\r
+        return str({\r
+            'tags': self.tags,\r
+            'fields': self.fields,\r
+            'timestamp': self.timestamp\r
+        })\r
+\r
+\r
+class MetricsAgent(BaseAgent):\r
+\r
+    def log_summary(self, status_info):\r
+        try:\r
+            if status_info:\r
+                measurement = status_info['measurement']\r
+                success_count = status_info['success_count']\r
+                failure_count = status_info['failure_count']\r
+                total_count = success_count + failure_count\r
+                display_string = \\r
+                    '%s agent stats in total count: %s, success count: %s, failure count: %s.'\r
+                self._logger.info(\r
+                    display_string % (measurement, total_count, success_count, failure_count)\r
+                )\r
+        except Exception as e:\r
+            self._logger.error(str(e))\r
+\r
+    def _run(self):\r
+        collect_data = self.data\r
+        result = {}\r
+        if collect_data and self._client:\r
+            status_info = self._client.send_info(collect_data, self.measurement)\r
+            # show summary info\r
+            self.log_summary(status_info)\r
+            # write sub_agent buffer\r
+            total_count = status_info['success_count'] + status_info['failure_count']\r
+            if total_count:\r
+                if status_info['success_count'] == 0:\r
+                    self._module_inst.status = \\r
+                        {'status': DP_MGR_STAT_FAILED,\r
+                         'reason': 'failed to send metrics data to the server'}\r
+                elif status_info['failure_count'] == 0:\r
+                    self._module_inst.status = \\r
+                        {'status': DP_MGR_STAT_OK}\r
+                else:\r
+                    self._module_inst.status = \\r
+                        {'status': DP_MGR_STAT_WARNING,\r
+                         'reason': 'failed to send partial metrics data to the server'}\r
+        return result\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py
new file mode 100644 (file)
index 0000000..4477802
--- /dev/null
@@ -0,0 +1,146 @@
+from __future__ import absolute_import\r
+\r
+import socket\r
+\r
+from . import MetricsAgent, MetricsField\r
+from ...common.clusterdata import ClusterAPI\r
+\r
+\r
+class CephCluster(MetricsField):\r
+    """ Ceph cluster structure """\r
+    measurement = 'ceph_cluster'\r
+\r
+    def __init__(self):\r
+        super(CephCluster, self).__init__()\r
+        self.tags['cluster_id'] = None\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.fields['cluster_health'] = ''\r
+        self.fields['num_mon'] = None\r
+        self.fields['num_mon_quorum'] = None\r
+        self.fields['num_osd'] = None\r
+        self.fields['num_osd_up'] = None\r
+        self.fields['num_osd_in'] = None\r
+        self.fields['osd_epoch'] = None\r
+        self.fields['osd_bytes'] = None\r
+        self.fields['osd_bytes_used'] = None\r
+        self.fields['osd_bytes_avail'] = None\r
+        self.fields['num_pool'] = None\r
+        self.fields['num_pg'] = None\r
+        self.fields['num_pg_active_clean'] = None\r
+        self.fields['num_pg_active'] = None\r
+        self.fields['num_pg_peering'] = None\r
+        self.fields['num_object'] = None\r
+        self.fields['num_object_degraded'] = None\r
+        self.fields['num_object_misplaced'] = None\r
+        self.fields['num_object_unfound'] = None\r
+        self.fields['num_bytes'] = None\r
+        self.fields['num_mds_up'] = None\r
+        self.fields['num_mds_in'] = None\r
+        self.fields['num_mds_failed'] = None\r
+        self.fields['mds_epoch'] = None\r
+\r
+\r
+class CephClusterAgent(MetricsAgent):\r
+    measurement = 'ceph_cluster'\r
+\r
+    def _collect_data(self):\r
+        # process data and save to 'self.data'\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        cluster_id = obj_api.get_cluster_id()\r
+\r
+        c_data = CephCluster()\r
+        cluster_state = obj_api.get_health_status()\r
+        c_data.tags['cluster_id'] = cluster_id\r
+        c_data.fields['cluster_health'] = str(cluster_state)\r
+        c_data.fields['agenthost'] = socket.gethostname()\r
+        c_data.tags['agenthost_domain_id'] = \\r
+            '%s_%s' % (cluster_id, c_data.fields['agenthost'])\r
+        c_data.fields['osd_epoch'] = obj_api.get_osd_epoch()\r
+        c_data.fields['num_mon'] = len(obj_api.get_mons())\r
+        c_data.fields['num_mon_quorum'] = \\r
+            len(obj_api.get_mon_status().get('quorum', []))\r
+\r
+        osds = obj_api.get_osds()\r
+        num_osd_up = 0\r
+        num_osd_in = 0\r
+        for osd_data in osds:\r
+            if osd_data.get('up'):\r
+                num_osd_up = num_osd_up + 1\r
+            if osd_data.get('in'):\r
+                num_osd_in = num_osd_in + 1\r
+        if osds:\r
+            c_data.fields['num_osd'] = len(osds)\r
+        else:\r
+            c_data.fields['num_osd'] = 0\r
+        c_data.fields['num_osd_up'] = num_osd_up\r
+        c_data.fields['num_osd_in'] = num_osd_in\r
+        c_data.fields['num_pool'] = len(obj_api.get_osd_pools())\r
+\r
+        df_stats = obj_api.module.get('df').get('stats', {})\r
+        total_bytes = df_stats.get('total_bytes', 0)\r
+        total_used_bytes = df_stats.get('total_used_bytes', 0)\r
+        total_avail_bytes = df_stats.get('total_avail_bytes', 0)\r
+        c_data.fields['osd_bytes'] = total_bytes\r
+        c_data.fields['osd_bytes_used'] = total_used_bytes\r
+        c_data.fields['osd_bytes_avail'] = total_avail_bytes\r
+        if total_bytes and total_avail_bytes:\r
+            c_data.fields['osd_bytes_used_percentage'] = \\r
+                round(float(total_used_bytes) / float(total_bytes) * 100, 4)\r
+        else:\r
+            c_data.fields['osd_bytes_used_percentage'] = 0.0000\r
+\r
+        pg_stats = obj_api.module.get('pg_dump').get('pg_stats', [])\r
+        num_bytes = 0\r
+        num_object = 0\r
+        num_object_degraded = 0\r
+        num_object_misplaced = 0\r
+        num_object_unfound = 0\r
+        num_pg_active = 0\r
+        num_pg_active_clean = 0\r
+        num_pg_peering = 0\r
+        for pg_data in pg_stats:\r
+            num_pg_active = num_pg_active + len(pg_data.get('acting'))\r
+            if 'active+clean' in pg_data.get('state'):\r
+                num_pg_active_clean = num_pg_active_clean + 1\r
+            if 'peering' in pg_data.get('state'):\r
+                num_pg_peering = num_pg_peering + 1\r
+\r
+            stat_sum = pg_data.get('stat_sum', {})\r
+            num_object = num_object + stat_sum.get('num_objects', 0)\r
+            num_object_degraded = \\r
+                num_object_degraded + stat_sum.get('num_objects_degraded', 0)\r
+            num_object_misplaced = \\r
+                num_object_misplaced + stat_sum.get('num_objects_misplaced', 0)\r
+            num_object_unfound = \\r
+                num_object_unfound + stat_sum.get('num_objects_unfound', 0)\r
+            num_bytes = num_bytes + stat_sum.get('num_bytes', 0)\r
+\r
+        c_data.fields['num_pg'] = len(pg_stats)\r
+        c_data.fields['num_object'] = num_object\r
+        c_data.fields['num_object_degraded'] = num_object_degraded\r
+        c_data.fields['num_object_misplaced'] = num_object_misplaced\r
+        c_data.fields['num_object_unfound'] = num_object_unfound\r
+        c_data.fields['num_bytes'] = num_bytes\r
+        c_data.fields['num_pg_active'] = num_pg_active\r
+        c_data.fields['num_pg_active_clean'] = num_pg_active_clean\r
+        c_data.fields['num_pg_peering'] = num_pg_active_clean\r
+\r
+        filesystems = obj_api.get_file_systems()\r
+        num_mds_in = 0\r
+        num_mds_up = 0\r
+        num_mds_failed = 0\r
+        mds_epoch = 0\r
+        for fs_data in filesystems:\r
+            num_mds_in = \\r
+                num_mds_in + len(fs_data.get('mdsmap', {}).get('in', []))\r
+            num_mds_up = \\r
+                num_mds_up + len(fs_data.get('mdsmap', {}).get('up', {}))\r
+            num_mds_failed = \\r
+                num_mds_failed + len(fs_data.get('mdsmap', {}).get('failed', []))\r
+            mds_epoch = mds_epoch + fs_data.get('mdsmap', {}).get('epoch', 0)\r
+        c_data.fields['num_mds_in'] = num_mds_in\r
+        c_data.fields['num_mds_up'] = num_mds_up\r
+        c_data.fields['num_mds_failed'] = num_mds_failed\r
+        c_data.fields['mds_epoch'] = mds_epoch\r
+        self.data.append(c_data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py
new file mode 100644 (file)
index 0000000..1386186
--- /dev/null
@@ -0,0 +1,224 @@
+from __future__ import absolute_import\r
+\r
+import socket\r
+\r
+from . import MetricsAgent, MetricsField\r
+from ...common.clusterdata import ClusterAPI\r
+\r
+\r
+class CephMON(MetricsField):\r
+    """ Ceph monitor structure """\r
+    measurement = 'ceph_mon'\r
+\r
+    def __init__(self):\r
+        super(CephMON, self).__init__()\r
+        self.tags['cluster_id'] = None\r
+        self.tags['mon_id'] = None\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.fields['num_sessions'] = None\r
+        self.fields['session_add'] = None\r
+        self.fields['session_rm'] = None\r
+        self.fields['session_trim'] = None\r
+        self.fields['num_elections'] = None\r
+        self.fields['election_call'] = None\r
+        self.fields['election_win'] = None\r
+        self.fields['election_lose'] = None\r
+\r
+\r
+class CephErasureProfile(MetricsField):\r
+    """ Ceph osd erasure profile """\r
+    measurement = 'ceph_erasure_profile'\r
+\r
+    def __init__(self):\r
+        super(CephErasureProfile, self).__init__()\r
+        self.tags['cluster_id'] = None\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.tags['host_domain_id'] = None\r
+        self.fields['name'] = None\r
+\r
+\r
+class CephOsdTree(MetricsField):\r
+    """ Ceph osd tree map """\r
+    measurement = 'ceph_osd_tree'\r
+\r
+    def __init__(self):\r
+        super(CephOsdTree, self).__init__()\r
+        self.tags['cluster_id'] = None\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.tags['host_domain_id'] = None\r
+        self.fields['name'] = None\r
+\r
+\r
+class CephOSD(MetricsField):\r
+    """ Ceph osd structure """\r
+    measurement = 'ceph_osd'\r
+\r
+    def __init__(self):\r
+        super(CephOSD, self).__init__()\r
+        self.tags['cluster_id'] = None\r
+        self.tags['osd_id'] = None\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.tags['host_domain_id'] = None\r
+        self.fields['op_w'] = None\r
+        self.fields['op_in_bytes'] = None\r
+        self.fields['op_r'] = None\r
+        self.fields['op_out_bytes'] = None\r
+        self.fields['op_wip'] = None\r
+        self.fields['op_latency'] = None\r
+        self.fields['op_process_latency'] = None\r
+        self.fields['op_r_latency'] = None\r
+        self.fields['op_r_process_latency'] = None\r
+        self.fields['op_w_in_bytes'] = None\r
+        self.fields['op_w_latency'] = None\r
+        self.fields['op_w_process_latency'] = None\r
+        self.fields['op_w_prepare_latency'] = None\r
+        self.fields['op_rw'] = None\r
+        self.fields['op_rw_in_bytes'] = None\r
+        self.fields['op_rw_out_bytes'] = None\r
+        self.fields['op_rw_latency'] = None\r
+        self.fields['op_rw_process_latency'] = None\r
+        self.fields['op_rw_prepare_latency'] = None\r
+        self.fields['op_before_queue_op_lat'] = None\r
+        self.fields['op_before_dequeue_op_lat'] = None\r
+\r
+\r
+class CephMonOsdAgent(MetricsAgent):\r
+    measurement = 'ceph_mon_osd'\r
+\r
+    # counter types\r
+    PERFCOUNTER_LONGRUNAVG = 4\r
+    PERFCOUNTER_COUNTER = 8\r
+    PERFCOUNTER_HISTOGRAM = 0x10\r
+    PERFCOUNTER_TYPE_MASK = ~3\r
+\r
+    def _stattype_to_str(self, stattype):\r
+        typeonly = stattype & self.PERFCOUNTER_TYPE_MASK\r
+        if typeonly == 0:\r
+            return 'gauge'\r
+        if typeonly == self.PERFCOUNTER_LONGRUNAVG:\r
+            # this lie matches the DaemonState decoding: only val, no counts\r
+            return 'counter'\r
+        if typeonly == self.PERFCOUNTER_COUNTER:\r
+            return 'counter'\r
+        if typeonly == self.PERFCOUNTER_HISTOGRAM:\r
+            return 'histogram'\r
+        return ''\r
+\r
+    def _generage_osd_erasure_profile(self, cluster_id):\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        osd_map = obj_api.module.get('osd_map')\r
+        if osd_map:\r
+            for n, n_value in osd_map.get('erasure_code_profiles', {}).items():\r
+                e_osd = CephErasureProfile()\r
+                e_osd.fields['name'] = n\r
+                e_osd.tags['cluster_id'] = cluster_id\r
+                e_osd.fields['agenthost'] = socket.gethostname()\r
+                e_osd.tags['agenthost_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname)\r
+                e_osd.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname)\r
+                for k in n_value.keys():\r
+                    e_osd.fields[k] = str(n_value[k])\r
+                self.data.append(e_osd)\r
+\r
+    def _generate_osd_tree(self, cluster_id):\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        osd_tree = obj_api.module.get('osd_map_tree')\r
+        if osd_tree:\r
+            for node in osd_tree.get('nodes', []):\r
+                n_node = CephOsdTree()\r
+                n_node.tags['cluster_id'] = cluster_id\r
+                n_node.fields['agenthost'] = socket.gethostname()\r
+                n_node.tags['agenthost_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname)\r
+                n_node.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname)\r
+                n_node.fields['children'] = ','.join(str(x) for x in node.get('children', []))\r
+                n_node.fields['type_id'] = str(node.get('type_id', ''))\r
+                n_node.fields['id'] = str(node.get('id', ''))\r
+                n_node.fields['name'] = str(node.get('name', ''))\r
+                n_node.fields['type'] = str(node.get('type', ''))\r
+                n_node.fields['reweight'] = float(node.get('reweight', 0.0))\r
+                n_node.fields['crush_weight'] = float(node.get('crush_weight', 0.0))\r
+                n_node.fields['primary_affinity'] = float(node.get('primary_affinity', 0.0))\r
+                n_node.fields['device_class'] = str(node.get('device_class', ''))\r
+                self.data.append(n_node)\r
+\r
+    def _generate_osd(self, cluster_id, service_name, perf_counts):\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        service_id = service_name[4:]\r
+        d_osd = CephOSD()\r
+        stat_bytes = 0\r
+        stat_bytes_used = 0\r
+        d_osd.tags['cluster_id'] = cluster_id\r
+        d_osd.tags['osd_id'] = service_name[4:]\r
+        d_osd.fields['agenthost'] = socket.gethostname()\r
+        d_osd.tags['agenthost_domain_id'] = \\r
+            '%s_%s' % (cluster_id, d_osd.fields['agenthost'])\r
+        d_osd.tags['host_domain_id'] = \\r
+            '%s_%s' % (cluster_id,\r
+                       obj_api.get_osd_hostname(d_osd.tags['osd_id']))\r
+\r
+        for i_key, i_val in perf_counts.items():\r
+            if i_key[:4] == 'osd.':\r
+                key_name = i_key[4:]\r
+            else:\r
+                key_name = i_key\r
+            if self._stattype_to_str(i_val['type']) == 'counter':\r
+                value = obj_api.get_rate('osd', service_id, i_key)\r
+            else:\r
+                value = obj_api.get_latest('osd', service_id, i_key)\r
+            if key_name == 'stat_bytes':\r
+                stat_bytes = value\r
+            elif key_name == 'stat_bytes_used':\r
+                stat_bytes_used = value\r
+            else:\r
+                d_osd.fields[key_name] = value\r
+\r
+        if stat_bytes and stat_bytes_used:\r
+            d_osd.fields['stat_bytes_used_percentage'] = \\r
+                round(float(stat_bytes_used) / float(stat_bytes) * 100, 4)\r
+        else:\r
+            d_osd.fields['stat_bytes_used_percentage'] = 0.0000\r
+        self.data.append(d_osd)\r
+\r
+    def _generate_mon(self, cluster_id, service_name, perf_counts):\r
+        d_mon = CephMON()\r
+        d_mon.tags['cluster_id'] = cluster_id\r
+        d_mon.tags['mon_id'] = service_name[4:]\r
+        d_mon.fields['agenthost'] = socket.gethostname()\r
+        d_mon.tags['agenthost_domain_id'] = \\r
+            '%s_%s' % (cluster_id, d_mon.fields['agenthost'])\r
+        d_mon.fields['num_sessions'] = \\r
+            perf_counts.get('mon.num_sessions', {}).get('value', 0)\r
+        d_mon.fields['session_add'] = \\r
+            perf_counts.get('mon.session_add', {}).get('value', 0)\r
+        d_mon.fields['session_rm'] = \\r
+            perf_counts.get('mon.session_rm', {}).get('value', 0)\r
+        d_mon.fields['session_trim'] = \\r
+            perf_counts.get('mon.session_trim', {}).get('value', 0)\r
+        d_mon.fields['num_elections'] = \\r
+            perf_counts.get('mon.num_elections', {}).get('value', 0)\r
+        d_mon.fields['election_call'] = \\r
+            perf_counts.get('mon.election_call', {}).get('value', 0)\r
+        d_mon.fields['election_win'] = \\r
+            perf_counts.get('mon.election_win', {}).get('value', 0)\r
+        d_mon.fields['election_lose'] = \\r
+            perf_counts.get('election_lose', {}).get('value', 0)\r
+        self.data.append(d_mon)\r
+\r
+    def _collect_data(self):\r
+        # process data and save to 'self.data'\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        perf_data = obj_api.module.get_all_perf_counters()\r
+        if not perf_data and not isinstance(perf_data, dict):\r
+            self._logger.error('unable to get all perf counters')\r
+            return\r
+        cluster_id = obj_api.get_cluster_id()\r
+        for n_name, i_perf in perf_data.items():\r
+            if n_name[0:3].lower() == 'mon':\r
+                self._generate_mon(cluster_id, n_name, i_perf)\r
+            elif n_name[0:3].lower() == 'osd':\r
+                self._generate_osd(cluster_id, n_name, i_perf)\r
+        self._generage_osd_erasure_profile(cluster_id)\r
+        self._generate_osd_tree(cluster_id)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py
new file mode 100644 (file)
index 0000000..43e379b
--- /dev/null
@@ -0,0 +1,58 @@
+from __future__ import absolute_import\r
+\r
+import socket\r
+\r
+from . import MetricsAgent, MetricsField\r
+from ...common.clusterdata import ClusterAPI\r
+\r
+\r
+class CephPool(MetricsField):\r
+    """ Ceph pool structure """\r
+    measurement = 'ceph_pool'\r
+\r
+    def __init__(self):\r
+        super(CephPool, self).__init__()\r
+        self.tags['cluster_id'] = None\r
+        self.tags['pool_id'] = None\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.fields['bytes_used'] = None\r
+        self.fields['max_avail'] = None\r
+        self.fields['objects'] = None\r
+        self.fields['wr_bytes'] = None\r
+        self.fields['dirty'] = None\r
+        self.fields['rd_bytes'] = None\r
+        self.fields['raw_bytes_used'] = None\r
+\r
+\r
+class CephPoolAgent(MetricsAgent):\r
+    measurement = 'ceph_pool'\r
+\r
+    def _collect_data(self):\r
+        # process data and save to 'self.data'\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        df_data = obj_api.module.get('df')\r
+        cluster_id = obj_api.get_cluster_id()\r
+        for pool in df_data.get('pools', []):\r
+            d_pool = CephPool()\r
+            p_id = pool.get('id')\r
+            d_pool.tags['cluster_id'] = cluster_id\r
+            d_pool.tags['pool_id'] = p_id\r
+            d_pool.fields['agenthost'] = socket.gethostname()\r
+            d_pool.tags['agenthost_domain_id'] = \\r
+                '%s_%s' % (cluster_id, d_pool.fields['agenthost'])\r
+            d_pool.fields['bytes_used'] = \\r
+                pool.get('stats', {}).get('bytes_used', 0)\r
+            d_pool.fields['max_avail'] = \\r
+                pool.get('stats', {}).get('max_avail', 0)\r
+            d_pool.fields['objects'] = \\r
+                pool.get('stats', {}).get('objects', 0)\r
+            d_pool.fields['wr_bytes'] = \\r
+                pool.get('stats', {}).get('wr_bytes', 0)\r
+            d_pool.fields['dirty'] = \\r
+                pool.get('stats', {}).get('dirty', 0)\r
+            d_pool.fields['rd_bytes'] = \\r
+                pool.get('stats', {}).get('rd_bytes', 0)\r
+            d_pool.fields['raw_bytes_used'] = \\r
+                pool.get('stats', {}).get('raw_bytes_used', 0)\r
+            self.data.append(d_pool)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py
new file mode 100644 (file)
index 0000000..ed4dad3
--- /dev/null
@@ -0,0 +1,713 @@
+from __future__ import absolute_import\r
+\r
+import re\r
+import socket\r
+\r
+from . import MetricsAgent, MetricsField\r
+from ...common.clusterdata import ClusterAPI\r
+from ...common.cypher import CypherOP, NodeInfo\r
+\r
+\r
+class BaseDP(object):\r
+    """ basic diskprediction structure """\r
+    _fields = []\r
+\r
+    def __init__(self, *args, **kwargs):\r
+        if len(args) > len(self._fields):\r
+            raise TypeError('Expected {} arguments'.format(len(self._fields)))\r
+\r
+        for name, value in zip(self._fields, args):\r
+            setattr(self, name, value)\r
+\r
+        for name in self._fields[len(args):]:\r
+            setattr(self, name, kwargs.pop(name))\r
+\r
+        if kwargs:\r
+            raise TypeError('Invalid argument(s): {}'.format(','.join(kwargs)))\r
+\r
+\r
+class MGRDpCeph(BaseDP):\r
+    _fields = [\r
+        'fsid', 'health', 'max_osd', 'size',\r
+        'avail_size', 'raw_used', 'raw_used_percent'\r
+    ]\r
+\r
+\r
+class MGRDpHost(BaseDP):\r
+    _fields = ['fsid', 'host', 'ipaddr']\r
+\r
+\r
+class MGRDpMon(BaseDP):\r
+    _fields = ['fsid', 'host', 'ipaddr']\r
+\r
+\r
+class MGRDpOsd(BaseDP):\r
+    _fields = [\r
+        'fsid', 'host', '_id', 'uuid', 'up', '_in', 'weight', 'public_addr',\r
+        'cluster_addr', 'state', 'ceph_release', 'osd_devices', 'rotational'\r
+    ]\r
+\r
+\r
+class MGRDpMds(BaseDP):\r
+    _fields = ['fsid', 'host', 'ipaddr']\r
+\r
+\r
+class MGRDpPool(BaseDP):\r
+    _fields = [\r
+        'fsid', 'size', 'pool_name', 'pool_id', 'type', 'min_size',\r
+        'pg_num', 'pgp_num', 'created_time', 'pgids', 'osd_ids', 'tiers', 'cache_mode',\r
+        'erasure_code_profile', 'tier_of'\r
+    ]\r
+\r
+\r
+class MGRDpRBD(BaseDP):\r
+    _fields = ['fsid', '_id', 'name', 'pool_name', 'pool_id']\r
+\r
+\r
+class MGRDpFS(BaseDP):\r
+    _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pools', 'mds_nodes']\r
+\r
+\r
+class MGRDpPG(BaseDP):\r
+    _fields = [\r
+        'fsid', 'pgid', 'up_osds', 'acting_osds', 'state',\r
+        'objects', 'degraded', 'misplaced', 'unfound'\r
+    ]\r
+\r
+\r
+class MGRDpDisk(BaseDP):\r
+    _fields = ['host_domain_id', 'host', 'fs_journal_osd', 'bs_db_osd', 'bs_wal_osd', 'data_osd', 'osd_ids']\r
+\r
+\r
+class DBRelay(MetricsField):\r
+    """ DB Relay structure """\r
+    measurement = 'db_relay'\r
+\r
+    def __init__(self):\r
+        super(DBRelay, self).__init__()\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.tags['dc_tag'] = 'na'\r
+        self.tags['host'] = None\r
+        self.fields['cmd'] = None\r
+\r
+\r
+class DBRelayAgent(MetricsAgent):\r
+    measurement = 'db_relay'\r
+\r
+    def __init__(self, *args, **kwargs):\r
+        super(DBRelayAgent, self).__init__(*args, **kwargs)\r
+        self._cluster_node = None\r
+        self._cluster_id = None\r
+        self._ceph = ClusterAPI(self._module_inst)\r
+        self._osd_maps = self._ceph.module.get('osd_map')\r
+        self._mon_maps = self._ceph.module.get('mon_map')\r
+        self._fs_maps = self._ceph.module.get('fs_map')\r
+        self._osd_metadata = self._ceph.module.get('osd_metadata')\r
+        self._host_nodes = dict()\r
+        self._osd_nodes = dict()\r
+        self._mon_nodes = dict()\r
+        self._mds_nodes = dict()\r
+        self._dev_nodes = dict()\r
+        self._pool_nodes = dict()\r
+        self._rbd_nodes = dict()\r
+        self._fs_nodes = dict()\r
+        # initial ceph all node states\r
+        self._init_cluster_node()\r
+        self._init_hosts()\r
+        self._init_mons()\r
+        self._init_mds()\r
+        self._init_osds()\r
+        self._init_devices()\r
+        self._init_pools()\r
+        self._init_rbds()\r
+        self._init_fs()\r
+\r
+    def _init_hosts(self):\r
+        hosts = set()\r
+        # Add host from osd\r
+        osd_data = self._osd_maps.get('osds', [])\r
+        for _data in osd_data:\r
+            osd_id = _data['osd']\r
+            if not _data.get('in'):\r
+                continue\r
+            osd_addr = _data['public_addr'].split(':')[0]\r
+            osd_metadata = self._ceph.get_osd_metadata(osd_id)\r
+            if osd_metadata:\r
+                osd_host = osd_metadata['hostname']\r
+                hosts.add((osd_host, osd_addr))\r
+\r
+        # Add host from mon\r
+        mons = self._mon_maps.get('mons', [])\r
+        for _data in mons:\r
+            mon_host = _data['name']\r
+            mon_addr = _data['public_addr'].split(':')[0]\r
+            if mon_host:\r
+                hosts.add((mon_host, mon_addr))\r
+\r
+        # Add host from mds\r
+        file_systems = self._fs_maps.get('filesystems', [])\r
+        for _data in file_systems:\r
+            mds_info = _data.get('mdsmap').get('info')\r
+            for _gid in mds_info:\r
+                mds_data = mds_info[_gid]\r
+                mds_addr = mds_data.get('addr').split(':')[0]\r
+                mds_host = mds_data.get('name')\r
+                if mds_host:\r
+                    hosts.add((mds_host, mds_addr))\r
+        for tp in hosts:\r
+            host = tp[0]\r
+            self._host_nodes[host] = None\r
+\r
+            host_node = NodeInfo(\r
+                label='VMHost',\r
+                domain_id='{}_{}'.format(self._cluster_id, host),\r
+                name=host,\r
+                meta={}\r
+            )\r
+            self._host_nodes[host] = host_node\r
+\r
+    def _init_mons(self):\r
+        cluster_id = self._cluster_id\r
+        mons = self._mon_maps.get('mons')\r
+        for mon in mons:\r
+            mon_name = mon.get('name', '')\r
+            mon_addr = mon.get('addr', '').split(':')[0]\r
+            if mon_name not in self._host_nodes.keys():\r
+                continue\r
+\r
+            dp_mon = MGRDpMon(\r
+                fsid=cluster_id,\r
+                host=mon_name,\r
+                ipaddr=mon_addr\r
+            )\r
+\r
+            # create mon node\r
+            mon_node = NodeInfo(\r
+                label='CephMon',\r
+                domain_id='{}.mon.{}'.format(cluster_id, mon_name),\r
+                name=mon_name,\r
+                meta=dp_mon.__dict__\r
+            )\r
+            self._mon_nodes[mon_name] = mon_node\r
+\r
+    def _init_mds(self):\r
+        cluster_id = self._cluster_id\r
+        file_systems = self._fs_maps.get('filesystems', [])\r
+        for _data in file_systems:\r
+            mds_info = _data.get('mdsmap').get('info')\r
+            for _gid in mds_info:\r
+                mds_data = mds_info[_gid]\r
+                mds_addr = mds_data.get('addr').split(':')[0]\r
+                mds_host = mds_data.get('name')\r
+                mds_gid = mds_data.get('gid')\r
+\r
+                if mds_host not in self._host_nodes:\r
+                    continue\r
+\r
+                dp_mds = MGRDpMds(\r
+                    fsid=cluster_id,\r
+                    host=mds_host,\r
+                    ipaddr=mds_addr\r
+                )\r
+\r
+                # create osd node\r
+                mds_node = NodeInfo(\r
+                    label='CephMds',\r
+                    domain_id='{}.mds.{}'.format(cluster_id, mds_gid),\r
+                    name='MDS.{}'.format(mds_gid),\r
+                    meta=dp_mds.__dict__\r
+                )\r
+                self._mds_nodes[mds_host] = mds_node\r
+\r
+    def _init_osds(self):\r
+        for osd in self._osd_maps.get('osds', []):\r
+            osd_id = osd.get('osd', -1)\r
+            meta = self._osd_metadata.get(str(osd_id), {})\r
+            osd_host = meta['hostname']\r
+            osd_ceph_version = meta['ceph_version']\r
+            osd_rotational = meta['rotational']\r
+            osd_devices = meta['devices'].split(',')\r
+\r
+            # filter 'dm' device.\r
+            devices = []\r
+            for devname in osd_devices:\r
+                if 'dm' in devname:\r
+                    continue\r
+                devices.append(devname)\r
+\r
+            if osd_host not in self._host_nodes.keys():\r
+                continue\r
+            self._osd_nodes[str(osd_id)] = None\r
+            public_addr = []\r
+            cluster_addr = []\r
+            for addr in osd.get('public_addrs', {}).get('addrvec', []):\r
+                public_addr.append(addr.get('addr'))\r
+            for addr in osd.get('cluster_addrs', {}).get('addrvec', []):\r
+                cluster_addr.append(addr.get('addr'))\r
+            dp_osd = MGRDpOsd(\r
+                fsid=self._cluster_id,\r
+                host=osd_host,\r
+                _id=osd_id,\r
+                uuid=osd.get('uuid'),\r
+                up=osd.get('up'),\r
+                _in=osd.get('in'),\r
+                weight=osd.get('weight'),\r
+                public_addr=','.join(public_addr),\r
+                cluster_addr=','.join(cluster_addr),\r
+                state=','.join(osd.get('state', [])),\r
+                ceph_release=osd_ceph_version,\r
+                osd_devices=','.join(devices),\r
+                rotational=osd_rotational)\r
+            for k, v in meta.items():\r
+                setattr(dp_osd, k, v)\r
+\r
+            # create osd node\r
+            osd_node = NodeInfo(\r
+                label='CephOsd',\r
+                domain_id='{}.osd.{}'.format(self._cluster_id, osd_id),\r
+                name='OSD.{}'.format(osd_id),\r
+                meta=dp_osd.__dict__\r
+            )\r
+            self._osd_nodes[str(osd_id)] = osd_node\r
+\r
+    def _init_devices(self):\r
+        r = re.compile('[^/dev]\D+')\r
+        for osdid, o_val in self._osd_nodes.items():\r
+            o_devs = o_val.meta.get('device_ids', '').split(',')\r
+            # fs_store\r
+            journal_devs = o_val.meta.get('backend_filestore_journal_dev_node', '').split(',')\r
+            # bs_store\r
+            bs_db_devs = o_val.meta.get('bluefs_db_dev_node', '').split(',')\r
+            bs_wal_devs = o_val.meta.get('bluefs_wal_dev_node', '').split(',')\r
+\r
+            for dev in o_devs:\r
+                fs_journal = []\r
+                bs_db = []\r
+                bs_wal = []\r
+                data = []\r
+                if len(dev.split('=')) != 2:\r
+                    continue\r
+                dev_name = dev.split('=')[0]\r
+                dev_id = dev.split('=')[1]\r
+                if not dev_id:\r
+                    continue\r
+\r
+                for j_dev in journal_devs:\r
+                    if dev_name == ''.join(r.findall(j_dev)):\r
+                        fs_journal.append(osdid)\r
+                for db_dev in bs_db_devs:\r
+                    if dev_name == ''.join(r.findall(db_dev)):\r
+                        bs_db.append(osdid)\r
+                for wal_dev in bs_wal_devs:\r
+                    if dev_name == ''.join(r.findall(wal_dev)):\r
+                        bs_wal.append(osdid)\r
+\r
+                if not fs_journal and not bs_db and not bs_wal:\r
+                    data.append(osdid)\r
+\r
+                disk_domain_id = dev_id\r
+                if disk_domain_id not in self._dev_nodes.keys():\r
+                    dp_disk = MGRDpDisk(\r
+                        host_domain_id='{}_{}'.format(self._cluster_id, o_val.meta.get('host')),\r
+                        host=o_val.meta.get('host'),\r
+                        osd_ids=osdid,\r
+                        fs_journal_osd=','.join(str(x) for x in fs_journal) if fs_journal else '',\r
+                        bs_db_osd=','.join(str(x) for x in bs_db) if bs_db else '',\r
+                        bs_wal_osd=','.join(str(x) for x in bs_wal) if bs_wal else '',\r
+                        data_osd=','.join(str(x) for x in data) if data else ''\r
+                    )\r
+                    # create disk node\r
+                    disk_node = NodeInfo(\r
+                        label='VMDisk',\r
+                        domain_id=disk_domain_id,\r
+                        name=dev_name,\r
+                        meta=dp_disk.__dict__\r
+                    )\r
+                    self._dev_nodes[disk_domain_id] = disk_node\r
+                else:\r
+                    dev_node = self._dev_nodes[disk_domain_id]\r
+                    osd_ids = dev_node.meta.get('osd_ids', '')\r
+                    if osdid not in osd_ids.split(','):\r
+                        arr_value = osd_ids.split(',')\r
+                        arr_value.append(str(osdid))\r
+                        dev_node.meta['osd_ids'] = ','.join(arr_value)\r
+                    if fs_journal:\r
+                        arr_value = None\r
+                        for t in fs_journal:\r
+                            value = dev_node.meta.get('fs_journal_osd', '')\r
+                            if value:\r
+                                arr_value = value.split(',')\r
+                            else:\r
+                                arr_value = []\r
+                            if t not in arr_value:\r
+                                arr_value.append(t)\r
+                        if arr_value:\r
+                            dev_node.meta['fs_journal_osd'] = ','.join(str(x) for x in arr_value)\r
+                    if bs_db:\r
+                        arr_value = None\r
+                        for t in bs_db:\r
+                            value = dev_node.meta.get('bs_db_osd', '')\r
+                            if value:\r
+                                arr_value = value.split(',')\r
+                            else:\r
+                                arr_value = []\r
+                            if t not in arr_value:\r
+                                arr_value.append(t)\r
+                        if arr_value:\r
+                            dev_node.meta['bs_db_osd'] = ','.join(str(x) for x in arr_value)\r
+                    if bs_wal:\r
+                        arr_value = None\r
+                        for t in bs_wal:\r
+                            value = dev_node.meta.get('bs_wal_osd', '')\r
+                            if value:\r
+                                arr_value = value.split(',')\r
+                            else:\r
+                                arr_value = []\r
+                            if t not in arr_value:\r
+                                arr_value.append(t)\r
+                        if arr_value:\r
+                            dev_node.meta['bs_wal_osd'] = ','.join(str(x) for x in arr_value)\r
+                    if data:\r
+                        arr_value = None\r
+                        for t in data:\r
+                            value = dev_node.meta.get('data_osd', '')\r
+                            if value:\r
+                                arr_value = value.split(',')\r
+                            else:\r
+                                arr_value = []\r
+                            if t not in arr_value:\r
+                                arr_value.append(t)\r
+                        if arr_value:\r
+                            dev_node.meta['data_osd'] = ','.join(str(x) for x in arr_value)\r
+\r
+    def _init_cluster_node(self):\r
+        cluster_id = self._ceph.get_cluster_id()\r
+        ceph_df_stat = self._ceph.get_ceph_df_state()\r
+        dp_cluster = MGRDpCeph(\r
+            fsid=cluster_id,\r
+            health=self._ceph.get_health_status(),\r
+            max_osd=len(self._ceph.get_osds()),\r
+            size=ceph_df_stat.get('total_size'),\r
+            avail_size=ceph_df_stat.get('avail_size'),\r
+            raw_used=ceph_df_stat.get('raw_used_size'),\r
+            raw_used_percent=ceph_df_stat.get('used_percent')\r
+        )\r
+        cluster_name = cluster_id[-12:]\r
+        cluster_node = NodeInfo(\r
+            label='CephCluster',\r
+            domain_id=cluster_id,\r
+            name='cluster-{}'.format(cluster_name),\r
+            meta=dp_cluster.__dict__\r
+        )\r
+        self._cluster_id = cluster_id\r
+        self._cluster_node = cluster_node\r
+\r
+    def _init_pools(self):\r
+        pools = self._osd_maps.get('pools', [])\r
+        cluster_id = self._cluster_id\r
+        for pool in pools:\r
+            osds = []\r
+            pgs = self._ceph.get_pgs_up_by_poolid(int(pool.get('pool', -1)))\r
+            for pg_id, osd_id in pgs.items():\r
+                for o_id in osd_id:\r
+                    if o_id not in osds:\r
+                        osds.append(str(o_id))\r
+            dp_pool = MGRDpPool(\r
+                fsid=cluster_id,\r
+                size=pool.get('size'),\r
+                pool_name=pool.get('pool_name'),\r
+                pool_id=pool.get('pool'),\r
+                type=pool.get('type'),\r
+                min_size=pool.get('min_szie'),\r
+                pg_num=pool.get('pg_num'),\r
+                pgp_num=pool.get('pg_placement_num'),\r
+                created_time=pool.get('create_time'),\r
+                pgids=','.join(pgs.keys()),\r
+                osd_ids=','.join(osds),\r
+                tiers=','.join(str(x) for x in pool.get('tiers', [])),\r
+                cache_mode=pool.get('cache_mode', ''),\r
+                erasure_code_profile=str(pool.get('erasure_code_profile', '')),\r
+                tier_of=str(pool.get('tier_of', -1)))\r
+            # create pool node\r
+            pool_node = NodeInfo(\r
+                label='CephPool',\r
+                domain_id='{}_pool_{}'.format(cluster_id, pool.get('pool')),\r
+                name=pool.get('pool_name'),\r
+                meta=dp_pool.__dict__\r
+            )\r
+            self._pool_nodes[str(pool.get('pool'))] = pool_node\r
+\r
+    def _init_rbds(self):\r
+        cluster_id = self._cluster_id\r
+        for p_id, p_node in self._pool_nodes.items():\r
+            rbds = self._ceph.get_rbd_list(p_node.name)\r
+            self._rbd_nodes[str(p_id)] = []\r
+            for rbd in rbds:\r
+                dp_rbd = MGRDpRBD(\r
+                    fsid=cluster_id,\r
+                    _id=rbd['id'],\r
+                    name=rbd['name'],\r
+                    pool_name=rbd['pool_name'],\r
+                    pool_id=p_id,\r
+                )\r
+                # create pool node\r
+                rbd_node = NodeInfo(\r
+                    label='CephRBD',\r
+                    domain_id='{}_rbd_{}'.format(cluster_id, rbd['id']),\r
+                    name=rbd['name'],\r
+                    meta=dp_rbd.__dict__,\r
+                )\r
+                self._rbd_nodes[str(p_id)].append(rbd_node)\r
+\r
+    def _init_fs(self):\r
+        # _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pool', 'mds_nodes']\r
+        cluster_id = self._cluster_id\r
+        file_systems = self._fs_maps.get('filesystems', [])\r
+        for fs in file_systems:\r
+            mdsmap = fs.get('mdsmap', {})\r
+            mds_hostnames = []\r
+            for m, md in mdsmap.get('info', {}).items():\r
+                if md.get('name') not in mds_hostnames:\r
+                    mds_hostnames.append(md.get('name'))\r
+            dp_fs = MGRDpFS(\r
+                fsid=cluster_id,\r
+                _id=fs.get('id'),\r
+                name=mdsmap.get('fs_name'),\r
+                metadata_pool=str(mdsmap.get('metadata_pool', -1)),\r
+                data_pools=','.join(str(i) for i in mdsmap.get('data_pools', [])),\r
+                mds_nodes=','.join(mds_hostnames),\r
+            )\r
+            fs_node = NodeInfo(\r
+                label='CephFS',\r
+                domain_id='{}_fs_{}'.format(cluster_id, fs.get('id')),\r
+                name=mdsmap.get('fs_name'),\r
+                meta=dp_fs.__dict__,\r
+            )\r
+            self._fs_nodes[str(fs.get('id'))] = fs_node\r
+\r
+    def _cluster_contains_host(self):\r
+        cluster_id = self._cluster_id\r
+        cluster_node = self._cluster_node\r
+\r
+        # create node relation\r
+        for h_id, h_node in self._host_nodes.items():\r
+            data = DBRelay()\r
+            # add osd node relationship\r
+            cypher_cmd = CypherOP.add_link(\r
+                cluster_node,\r
+                h_node,\r
+                'CephClusterContainsHost'\r
+            )\r
+            cluster_host = socket.gethostname()\r
+            data.fields['agenthost'] = cluster_host\r
+            data.tags['agenthost_domain_id'] = \\r
+                str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+            data.tags['host'] = cluster_host\r
+            data.fields['cmd'] = str(cypher_cmd)\r
+            self.data.append(data)\r
+\r
+    def _host_contains_mon(self):\r
+        for m_name, m_node in self._mon_nodes.items():\r
+            host_node = self._host_nodes.get(m_name)\r
+            if not host_node:\r
+                continue\r
+            data = DBRelay()\r
+            # add mon node relationship\r
+            cypher_cmd = CypherOP.add_link(\r
+                host_node,\r
+                m_node,\r
+                'HostContainsMon'\r
+            )\r
+            cluster_host = socket.gethostname()\r
+            data.fields['agenthost'] = cluster_host\r
+            data.tags['agenthost_domain_id'] = \\r
+                str('%s_%s' % (self._cluster_id, data.fields['agenthost']))\r
+            data.tags['host'] = cluster_host\r
+            data.fields['cmd'] = str(cypher_cmd)\r
+            self.data.append(data)\r
+\r
+    def _host_contains_osd(self):\r
+        cluster_id = self._cluster_id\r
+        for o_id, o_node in self._osd_nodes.items():\r
+            host_node = self._host_nodes.get(o_node.meta.get('host'))\r
+            if not host_node:\r
+                continue\r
+            data = DBRelay()\r
+            # add osd node relationship\r
+            cypher_cmd = CypherOP.add_link(\r
+                host_node,\r
+                o_node,\r
+                'HostContainsOsd'\r
+            )\r
+            cluster_host = socket.gethostname()\r
+            data.fields['agenthost'] = cluster_host\r
+            data.tags['agenthost_domain_id'] = \\r
+                str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+            data.tags['host'] = cluster_host\r
+            data.fields['cmd'] = str(cypher_cmd)\r
+            self.data.append(data)\r
+\r
+    def _host_contains_mds(self):\r
+        cluster_id = self._cluster_id\r
+        for m_name, mds_node in self._mds_nodes.items():\r
+            data = DBRelay()\r
+            host_node = self._host_nodes.get(mds_node.meta.get('host'))\r
+            if not host_node:\r
+                continue\r
+            # add osd node relationship\r
+            cypher_cmd = CypherOP.add_link(\r
+                host_node,\r
+                mds_node,\r
+                'HostContainsMds'\r
+            )\r
+            cluster_host = socket.gethostname()\r
+            data.fields['agenthost'] = cluster_host\r
+            data.tags['agenthost_domain_id'] = \\r
+                str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+            data.tags['host'] = cluster_host\r
+            data.fields['cmd'] = str(cypher_cmd)\r
+            self.data.append(data)\r
+\r
+    def _osd_contains_disk(self):\r
+        cluster_id = self._cluster_id\r
+        cluster_host = socket.gethostname()\r
+        for d_name, d_node in self._dev_nodes.items():\r
+            keys = {'data_osd': 'DataDiskOfOSD',\r
+                    'fs_journal_osd': 'FsJounalDiskOfOSD',\r
+                    'bs_db_osd': 'BsDBDiskOfOSD',\r
+                    'bs_wal_osd': 'BsWalDiskOfOSD'}\r
+            for k, v in keys.items():\r
+                if not d_node.meta.get(k):\r
+                    continue\r
+                for osdid in d_node.meta.get(k, '').split(','):\r
+                    data = DBRelay()\r
+                    osd_node = self._osd_nodes.get(str(osdid))\r
+                    if not osd_node:\r
+                        continue\r
+                    # add disk node relationship\r
+                    cypher_cmd = CypherOP.add_link(\r
+                        osd_node,\r
+                        d_node,\r
+                        v)\r
+                    data.fields['agenthost'] = cluster_host\r
+                    data.tags['agenthost_domain_id'] = \\r
+                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                    data.tags['host'] = cluster_host\r
+                    data.fields['cmd'] = str(cypher_cmd)\r
+                    self.data.append(data)\r
+\r
+            hostname = d_node.meta.get('host', '')\r
+            if not hostname:\r
+                continue\r
+            host_node = self._host_nodes.get(hostname)\r
+            if not host_node:\r
+                continue\r
+            # add osd node relationship\r
+            data = DBRelay()\r
+            cypher_cmd = CypherOP.add_link(\r
+                host_node,\r
+                d_node,\r
+                'VmHostContainsVmDisk'\r
+            )\r
+            data.fields['agenthost'] = cluster_host\r
+            data.tags['agenthost_domain_id'] = \\r
+                str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+            data.tags['host'] = cluster_host\r
+            data.fields['cmd'] = str(cypher_cmd)\r
+            self.data.append(data)\r
+\r
+    def _pool_contains_osd(self):\r
+        cluster_id = self._cluster_id\r
+        cluster_host = socket.gethostname()\r
+        for p_id, p_node in self._pool_nodes.items():\r
+            for o_id in p_node.meta.get('osd_ids', '').split(','):\r
+                osd_node = self._osd_nodes.get(str(o_id))\r
+                if not osd_node:\r
+                    continue\r
+                data = DBRelay()\r
+                cypher_cmd = CypherOP.add_link(\r
+                    osd_node,\r
+                    p_node,\r
+                    'OsdContainsPool'\r
+                )\r
+                data.fields['agenthost'] = cluster_host\r
+                data.tags['agenthost_domain_id'] = \\r
+                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                data.tags['host'] = cluster_host\r
+                data.fields['cmd'] = str(cypher_cmd)\r
+                self.data.append(data)\r
+\r
+    def _pool_contains_rbd(self):\r
+        cluster_id = self._cluster_id\r
+        cluster_host = socket.gethostname()\r
+        for p_id, p_node in self._pool_nodes.items():\r
+            for rbd_node in self._rbd_nodes.get(str(p_id), []):\r
+                if not rbd_node:\r
+                    continue\r
+                data = DBRelay()\r
+                cypher_cmd = CypherOP.add_link(\r
+                    p_node,\r
+                    rbd_node,\r
+                    'PoolContainsRBD'\r
+                )\r
+                data.fields['agenthost'] = cluster_host\r
+                data.tags['agenthost_domain_id'] = \\r
+                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                data.tags['host'] = cluster_host\r
+                data.fields['cmd'] = str(cypher_cmd)\r
+                self.data.append(data)\r
+\r
+    def _pool_contains_fs(self):\r
+        cluster_id = self._cluster_id\r
+        cluster_host = socket.gethostname()\r
+        for fs_id, fs_node in self._fs_nodes.items():\r
+            pool_attrs = ['metadata_pool', 'data_pools']\r
+            for p_attr in pool_attrs:\r
+                pools_id = fs_node.meta.get(p_attr).split(',')\r
+                for p_id in pools_id:\r
+                    p_node = self._pool_nodes.get(str(p_id))\r
+                    if p_node:\r
+                        data = DBRelay()\r
+                        cypher_cmd = CypherOP.add_link(\r
+                            p_node,\r
+                            fs_node,\r
+                            'MetadataPoolContainsFS' if p_attr == 'metadata_pool' else 'DataPoolContainsFS'\r
+                        )\r
+                        data.fields['agenthost'] = cluster_host\r
+                        data.tags['agenthost_domain_id'] = \\r
+                            str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                        data.tags['host'] = cluster_host\r
+                        data.fields['cmd'] = str(cypher_cmd)\r
+                        self.data.append(data)\r
+            for mds_name in fs_node.meta.get('mds_nodes', '').split(','):\r
+                mds_node = self._mds_nodes.get(mds_name)\r
+                if not mds_node:\r
+                    continue\r
+                data = DBRelay()\r
+                cypher_cmd = CypherOP.add_link(\r
+                    mds_node,\r
+                    fs_node,\r
+                    'MDSContainsFS'\r
+                )\r
+                data.fields['agenthost'] = cluster_host\r
+                data.tags['agenthost_domain_id'] = \\r
+                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                data.tags['host'] = cluster_host\r
+                data.fields['cmd'] = str(cypher_cmd)\r
+                self.data.append(data)\r
+\r
+    def _collect_data(self):\r
+        if not self._module_inst:\r
+            return\r
+        job_name = ['cluster_contains_host', 'host_contains_mon', 'host_contains_mds', 'host_contains_osd', 'osd_contains_disk',\r
+                    'pool_contains_osd', 'pool_contains_rbd', 'pool_contains_fs']\r
+        for job in job_name:\r
+            fn = getattr(self, '_%s' % job)\r
+            if not fn:\r
+                continue\r
+            try:\r
+                fn()\r
+            except Exception as e:\r
+                self._module_inst.log.error('dbrelay - execute function {} fail, due to {}'.format(job, str(e)))\r
+                continue\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py
new file mode 100644 (file)
index 0000000..63c8e87
--- /dev/null
@@ -0,0 +1,71 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+from __future__ import absolute_import
+
+import socket
+import time
+
+from . import AGENT_VERSION, MetricsAgent, MetricsField
+from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING
+from ...common.clusterdata import ClusterAPI
+
+
+class SAIAgentFields(MetricsField):
+    """ SAI DiskSmart structure """
+    measurement = 'sai_agent'
+
+    def __init__(self):
+        super(SAIAgentFields, self).__init__()
+        self.tags['agenthost_domain_id'] = None
+        self.fields['agent_type'] = str('ceph')
+        self.fields['agent_version'] = str(AGENT_VERSION)
+        self.fields['agenthost'] = ''
+        self.fields['cluster_domain_id'] = ''
+        self.fields['heartbeat_interval'] = ''
+        self.fields['host_ip'] = ''
+        self.fields['host_name'] = ''
+        self.fields['is_error'] = False
+        self.fields['is_ceph_error'] = False
+        self.fields['needs_warning'] = False
+        self.fields['send'] = None
+
+
+class SAIAgent(MetricsAgent):
+    measurement = 'sai_agent'
+
+    def _collect_data(self):
+        mgr_id = []
+        c_data = SAIAgentFields()
+        obj_api = ClusterAPI(self._module_inst)
+        svc_data = obj_api.get_server(socket.gethostname())
+        cluster_state = obj_api.get_health_status()
+        if not svc_data:
+            raise Exception('unable to get %s service info' % socket.gethostname())
+        # Filter mgr id
+        for s in svc_data.get('services', []):
+            if s.get('type', '') == 'mgr':
+                mgr_id.append(s.get('id'))
+
+        for _id in mgr_id:
+            mgr_meta = obj_api.get_mgr_metadata(_id)
+            cluster_id = obj_api.get_cluster_id()
+            c_data.fields['cluster_domain_id'] = str(cluster_id)
+            c_data.fields['agenthost'] = str(socket.gethostname())
+            c_data.tags['agenthost_domain_id'] = \
+                str('%s_%s' % (cluster_id, c_data.fields['agenthost']))
+            c_data.fields['heartbeat_interval'] = \
+                int(obj_api.get_configuration('diskprediction_upload_metrics_interval'))
+            c_data.fields['host_ip'] = str(mgr_meta.get('addr', '127.0.0.1'))
+            c_data.fields['host_name'] = str(socket.gethostname())
+            if obj_api.module.status.get('status', '') in [DP_MGR_STAT_WARNING, DP_MGR_STAT_FAILED]:
+                c_data.fields['is_error'] = bool(True)
+            else:
+                c_data.fields['is_error'] = bool(False)
+            if cluster_state in ['HEALTH_ERR', 'HEALTH_WARN']:
+                c_data.fields['is_ceph_error'] = bool(True)
+                c_data.fields['needs_warning'] = bool(True)
+                c_data.fields['is_error'] = bool(True)
+                c_data.fields['problems'] = str(obj_api.get_health_checks())
+            else:
+                c_data.fields['is_ceph_error'] = bool(False)
+            c_data.fields['send'] = int(time.time() * 1000)
+            self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py
new file mode 100644 (file)
index 0000000..f1f23be
--- /dev/null
@@ -0,0 +1,36 @@
+from __future__ import absolute_import
+
+import socket
+
+from . import AGENT_VERSION, MetricsAgent, MetricsField
+from ...common.clusterdata import ClusterAPI
+
+
+class SAIClusterFields(MetricsField):
+    """ SAI Host structure """
+    measurement = 'sai_cluster'
+
+    def __init__(self):
+        super(SAIClusterFields, self).__init__()
+        self.tags['domain_id'] = None
+        self.fields['agenthost'] = None
+        self.fields['agenthost_domain_id'] = None
+        self.fields['name'] = None
+        self.fields['agent_version'] = str(AGENT_VERSION)
+
+
+class SAICluserAgent(MetricsAgent):
+    measurement = 'sai_cluster'
+
+    def _collect_data(self):
+        c_data = SAIClusterFields()
+        obj_api = ClusterAPI(self._module_inst)
+        cluster_id = obj_api.get_cluster_id()
+
+        c_data.tags['domain_id'] = str(cluster_id)
+        c_data.tags['host_domain_id'] = '%s_%s' % (str(cluster_id), str(socket.gethostname()))
+        c_data.fields['agenthost'] = str(socket.gethostname())
+        c_data.tags['agenthost_domain_id'] = \
+            str('%s_%s' % (cluster_id, c_data.fields['agenthost']))
+        c_data.fields['name'] = 'Ceph mgr plugin'
+        self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py
new file mode 100644 (file)
index 0000000..1f3e1e5
--- /dev/null
@@ -0,0 +1,176 @@
+from __future__ import absolute_import\r
+\r
+import socket\r
+\r
+from . import AGENT_VERSION, MetricsAgent, MetricsField\r
+from ...common import get_human_readable\r
+from ...common.clusterdata import ClusterAPI\r
+\r
+\r
+class SAIDiskFields(MetricsField):\r
+    """ SAI Disk structure """\r
+    measurement = 'sai_disk'\r
+\r
+    def __init__(self):\r
+        super(SAIDiskFields, self).__init__()\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.tags['disk_domain_id'] = None\r
+        self.tags['disk_name'] = None\r
+        self.tags['disk_wwn'] = None\r
+        self.tags['primary_key'] = None\r
+        self.fields['cluster_domain_id'] = None\r
+        self.fields['host_domain_id'] = None\r
+        self.fields['model'] = None\r
+        self.fields['serial_number'] = None\r
+        self.fields['size'] = None\r
+        self.fields['vendor'] = None\r
+        self.fields['agent_version'] = str(AGENT_VERSION)\r
+\r
+        """disk_status\r
+        0: unknown  1: good     2: failure\r
+        """\r
+        self.fields['disk_status'] = 0\r
+\r
+        """disk_type\r
+        0: unknown  1: HDD      2: SSD      3: SSD NVME\r
+        4: SSD SAS  5: SSD SATA 6: HDD SAS  7: HDD SATA\r
+        """\r
+        self.fields['disk_type'] = 0\r
+\r
+\r
+class SAIDiskAgent(MetricsAgent):\r
+    measurement = 'sai_disk'\r
+\r
+    @staticmethod\r
+    def _convert_disk_type(is_ssd, sata_version, protocol):\r
+        """ return type:\r
+            0: "Unknown', 1: 'HDD',\r
+            2: 'SSD",     3: "SSD NVME",\r
+            4: "SSD SAS", 5: "SSD SATA",\r
+            6: "HDD SAS", 7: "HDD SATA"\r
+        """\r
+        if is_ssd:\r
+            if sata_version and not protocol:\r
+                disk_type = 5\r
+            elif 'SCSI'.lower() in protocol.lower():\r
+                disk_type = 4\r
+            elif 'NVMe'.lower() in protocol.lower():\r
+                disk_type = 3\r
+            else:\r
+                disk_type = 2\r
+        else:\r
+            if sata_version and not protocol:\r
+                disk_type = 7\r
+            elif 'SCSI'.lower() in protocol.lower():\r
+                disk_type = 6\r
+            else:\r
+                disk_type = 1\r
+        return disk_type\r
+\r
+    def _collect_data(self):\r
+        # process data and save to 'self.data'\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        cluster_id = obj_api.get_cluster_id()\r
+        osds = obj_api.get_osds()\r
+        for osd in osds:\r
+            if osd.get('osd') is None:\r
+                continue\r
+            if not osd.get('in'):\r
+                continue\r
+            osds_meta = obj_api.get_osd_metadata(osd.get('osd'))\r
+            if not osds_meta:\r
+                continue\r
+            osds_smart = obj_api.get_osd_smart(osd.get('osd'))\r
+            if not osds_smart:\r
+                continue\r
+            for dev_name, s_val in osds_smart.items():\r
+                d_data = SAIDiskFields()\r
+                d_data.tags['disk_name'] = str(dev_name)\r
+                d_data.fields['cluster_domain_id'] = str(cluster_id)\r
+                d_data.tags['host_domain_id'] = \\r
+                    str('%s_%s'\r
+                        % (cluster_id, osds_meta.get('hostname', 'None')))\r
+                d_data.fields['agenthost'] = str(socket.gethostname())\r
+                d_data.tags['agenthost_domain_id'] = \\r
+                    str('%s_%s' % (cluster_id, d_data.fields['agenthost']))\r
+                serial_number = s_val.get('serial_number')\r
+                wwn = s_val.get('wwn', {})\r
+                wwpn = ''\r
+                if wwn:\r
+                    wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))\r
+                    for k in wwn.keys():\r
+                        if k in ['naa', 't10', 'eui', 'iqn']:\r
+                            wwpn = ('%X%s' % (wwn[k], wwpn)).lower()\r
+                            break\r
+\r
+                if wwpn:\r
+                    d_data.tags['disk_domain_id'] = str(dev_name)\r
+                    d_data.tags['disk_wwn'] = str(wwpn)\r
+                    if serial_number:\r
+                        d_data.fields['serial_number'] = str(serial_number)\r
+                    else:\r
+                        d_data.fields['serial_number'] = str(wwpn)\r
+                elif serial_number:\r
+                    d_data.tags['disk_domain_id'] = str(dev_name)\r
+                    d_data.fields['serial_number'] = str(serial_number)\r
+                    if wwpn:\r
+                        d_data.tags['disk_wwn'] = str(wwpn)\r
+                    else:\r
+                        d_data.tags['disk_wwn'] = str(serial_number)\r
+                else:\r
+                    d_data.tags['disk_domain_id'] = str(dev_name)\r
+                    d_data.tags['disk_wwn'] = str(dev_name)\r
+                    d_data.fields['serial_number'] = str(dev_name)\r
+                d_data.tags['primary_key'] = \\r
+                    str('%s%s%s'\r
+                        % (cluster_id, d_data.tags['host_domain_id'],\r
+                           d_data.tags['disk_domain_id']))\r
+                d_data.fields['disk_status'] = int(1)\r
+                is_ssd = True if s_val.get('rotation_rate') == 0 else False\r
+                vendor = s_val.get('vendor', None)\r
+                model = s_val.get('model_name', None)\r
+                if s_val.get('sata_version', {}).get('string'):\r
+                    sata_version = s_val['sata_version']['string']\r
+                else:\r
+                    sata_version = ''\r
+                if s_val.get('device', {}).get('protocol'):\r
+                    protocol = s_val['device']['protocol']\r
+                else:\r
+                    protocol = ''\r
+                d_data.fields['disk_type'] = \\r
+                    self._convert_disk_type(is_ssd, sata_version, protocol)\r
+                d_data.fields['firmware_version'] = \\r
+                    str(s_val.get('firmware_version'))\r
+                if model:\r
+                    d_data.fields['model'] = str(model)\r
+                if vendor:\r
+                    d_data.fields['vendor'] = str(vendor)\r
+                if sata_version:\r
+                    d_data.fields['sata_version'] = str(sata_version)\r
+                if s_val.get('logical_block_size'):\r
+                    d_data.fields['sector_size'] = \\r
+                        str(str(s_val['logical_block_size']))\r
+                d_data.fields['transport_protocol'] = str('')\r
+                d_data.fields['vendor'] = \\r
+                    str(s_val.get('model_family', '')).replace('\"', '\'')\r
+                try:\r
+                    if isinstance(s_val.get('user_capacity'), dict):\r
+                        if isinstance(s_val['user_capacity'].get('bytes'), dict):\r
+                            user_capacity = \\r
+                                s_val['user_capacity'].get('bytes', {}).get('n', 0)\r
+                        else:\r
+                            user_capacity = s_val['user_capacity'].get('bytes')\r
+                    else:\r
+                        user_capacity = s_val.get('user_capacity', 0)\r
+                except ValueError:\r
+                    user_capacity = 0\r
+                if str(user_capacity).isdigit():\r
+                    d_data.fields['size'] = get_human_readable(int(user_capacity), 0)\r
+                else:\r
+                    d_data.fields['size'] = str(user_capacity)\r
+                if s_val.get('smart_status', {}).get('passed'):\r
+                    d_data.fields['smart_health_status'] = 'PASSED'\r
+                else:\r
+                    d_data.fields['smart_health_status'] = 'FAILED'\r
+                self.data.append(d_data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py
new file mode 100644 (file)
index 0000000..0f03a82
--- /dev/null
@@ -0,0 +1,156 @@
+from __future__ import absolute_import\r
+\r
+import datetime\r
+import json\r
+import _strptime\r
+import socket\r
+import time\r
+\r
+from . import AGENT_VERSION, MetricsAgent, MetricsField\r
+from ...common.clusterdata import ClusterAPI\r
+\r
+\r
+class SAIDiskSmartFields(MetricsField):\r
+    """ SAI DiskSmart structure """\r
+    measurement = 'sai_disk_smart'\r
+\r
+    def __init__(self):\r
+        super(SAIDiskSmartFields, self).__init__()\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.tags['disk_domain_id'] = None\r
+        self.tags['disk_name'] = None\r
+        self.tags['disk_wwn'] = None\r
+        self.tags['primary_key'] = None\r
+        self.fields['cluster_domain_id'] = None\r
+        self.fields['host_domain_id'] = None\r
+        self.fields['agent_version'] = str(AGENT_VERSION)\r
+\r
+\r
+class SAIDiskSmartAgent(MetricsAgent):\r
+    measurement = 'sai_disk_smart'\r
+\r
+    def _collect_data(self):\r
+        # process data and save to 'self.data'\r
+        obj_api = ClusterAPI(self._module_inst)\r
+        cluster_id = obj_api.get_cluster_id()\r
+        osds = obj_api.get_osds()\r
+        for osd in osds:\r
+            if osd.get('osd') is None:\r
+                continue\r
+            if not osd.get('in'):\r
+                continue\r
+            osds_meta = obj_api.get_osd_metadata(osd.get('osd'))\r
+            if not osds_meta:\r
+                continue\r
+            devs_info = obj_api.get_osd_device_id(osd.get('osd'))\r
+            if devs_info:\r
+                for dev_name, dev_info in devs_info.items():\r
+                    osds_smart = obj_api.get_device_health(dev_info['dev_id'])\r
+                    if not osds_smart:\r
+                        continue\r
+                    # Always pass through last smart data record\r
+                    o_key = sorted(osds_smart.keys(), reverse=True)[0]\r
+                    if o_key:\r
+                        s_date = o_key\r
+                        s_val = osds_smart[s_date]\r
+                        smart_data = SAIDiskSmartFields()\r
+                        smart_data.tags['disk_name'] = str(dev_name)\r
+                        smart_data.fields['cluster_domain_id'] = str(cluster_id)\r
+                        smart_data.tags['host_domain_id'] = \\r
+                            str('%s_%s'\r
+                                % (cluster_id, osds_meta.get('hostname', 'None')))\r
+                        smart_data.fields['agenthost'] = str(socket.gethostname())\r
+                        smart_data.tags['agenthost_domain_id'] = \\r
+                            str('%s_%s' % (cluster_id, smart_data.fields['agenthost']))\r
+                        # parse attributes\r
+                        ata_smart = s_val.get('ata_smart_attributes', {})\r
+                        for attr in ata_smart.get('table', []):\r
+                            if attr.get('raw', {}).get('string'):\r
+                                if str(attr.get('raw', {}).get('string', '0')).isdigit():\r
+                                    smart_data.fields['%s_raw' % attr.get('id')] = \\r
+                                        int(attr.get('raw', {}).get('string', '0'))\r
+                                else:\r
+                                    if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit():\r
+                                        smart_data.fields['%s_raw' % attr.get('id')] = \\r
+                                            int(attr.get('raw', {}).get('string', '0').split(' ')[0])\r
+                                    else:\r
+                                        smart_data.fields['%s_raw' % attr.get('id')] = \\r
+                                            attr.get('raw', {}).get('value', 0)\r
+                        smart_data.fields['raw_data'] = str(json.dumps(osds_smart[s_date]).replace("\"", "\'"))\r
+                        if s_val.get('temperature', {}).get('current') is not None:\r
+                            smart_data.fields['CurrentDriveTemperature_raw'] = \\r
+                                int(s_val['temperature']['current'])\r
+                        if s_val.get('temperature', {}).get('drive_trip') is not None:\r
+                            smart_data.fields['DriveTripTemperature_raw'] = \\r
+                                int(s_val['temperature']['drive_trip'])\r
+                        if s_val.get('elements_grown_list') is not None:\r
+                            smart_data.fields['ElementsInGrownDefectList_raw'] = int(s_val['elements_grown_list'])\r
+                        if s_val.get('power_on_time', {}).get('hours') is not None:\r
+                            smart_data.fields['9_raw'] = int(s_val['power_on_time']['hours'])\r
+                        if s_val.get('scsi_percentage_used_endurance_indicator') is not None:\r
+                            smart_data.fields['PercentageUsedEnduranceIndicator_raw'] = \\r
+                                int(s_val['scsi_percentage_used_endurance_indicator'])\r
+                        if s_val.get('scsi_error_counter_log') is not None:\r
+                            s_err_counter = s_val['scsi_error_counter_log']\r
+                            for s_key in s_err_counter.keys():\r
+                                if s_key.lower() in ['read', 'write']:\r
+                                    for s1_key in s_err_counter[s_key].keys():\r
+                                        if s1_key.lower() == 'errors_corrected_by_eccfast':\r
+                                            smart_data.fields['ErrorsCorrectedbyECCFast%s_raw' % s_key.capitalize()] = \\r
+                                                int(s_err_counter[s_key]['errors_corrected_by_eccfast'])\r
+                                        elif s1_key.lower() == 'errors_corrected_by_eccdelayed':\r
+                                            smart_data.fields['ErrorsCorrectedbyECCDelayed%s_raw' % s_key.capitalize()] = \\r
+                                                int(s_err_counter[s_key]['errors_corrected_by_eccdelayed'])\r
+                                        elif s1_key.lower() == 'errors_corrected_by_rereads_rewrites':\r
+                                            smart_data.fields['ErrorCorrectedByRereadsRewrites%s_raw' % s_key.capitalize()] = \\r
+                                                int(s_err_counter[s_key]['errors_corrected_by_rereads_rewrites'])\r
+                                        elif s1_key.lower() == 'total_errors_corrected':\r
+                                            smart_data.fields['TotalErrorsCorrected%s_raw' % s_key.capitalize()] = \\r
+                                                int(s_err_counter[s_key]['total_errors_corrected'])\r
+                                        elif s1_key.lower() == 'correction_algorithm_invocations':\r
+                                            smart_data.fields['CorrectionAlgorithmInvocations%s_raw' % s_key.capitalize()] = \\r
+                                                int(s_err_counter[s_key]['correction_algorithm_invocations'])\r
+                                        elif s1_key.lower() == 'gigabytes_processed':\r
+                                            smart_data.fields['GigaBytesProcessed%s_raw' % s_key.capitalize()] = \\r
+                                                float(s_err_counter[s_key]['gigabytes_processed'])\r
+                                        elif s1_key.lower() == 'total_uncorrected_errors':\r
+                                            smart_data.fields['TotalUncorrectedErrors%s_raw' % s_key.capitalize()] = \\r
+                                                int(s_err_counter[s_key]['total_uncorrected_errors'])\r
+\r
+                        serial_number = s_val.get('serial_number')\r
+                        wwn = s_val.get('wwn', {})\r
+                        wwpn = ''\r
+                        if wwn:\r
+                            wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))\r
+                            for k in wwn.keys():\r
+                                if k in ['naa', 't10', 'eui', 'iqn']:\r
+                                    wwpn = ('%X%s' % (wwn[k], wwpn)).lower()\r
+                                    break\r
+                        if wwpn:\r
+                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
+                            smart_data.tags['disk_wwn'] = str(wwpn)\r
+                            if serial_number:\r
+                                smart_data.fields['serial_number'] = str(serial_number)\r
+                            else:\r
+                                smart_data.fields['serial_number'] = str(wwpn)\r
+                        elif serial_number:\r
+                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
+                            smart_data.fields['serial_number'] = str(serial_number)\r
+                            if wwpn:\r
+                                smart_data.tags['disk_wwn'] = str(wwpn)\r
+                            else:\r
+                                smart_data.tags['disk_wwn'] = str(serial_number)\r
+                        else:\r
+                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
+                            smart_data.tags['disk_wwn'] = str(dev_name)\r
+                            smart_data.fields['serial_number'] = str(dev_name)\r
+                        smart_data.tags['primary_key'] = \\r
+                            str('%s%s%s'\r
+                                % (cluster_id,\r
+                                   smart_data.tags['host_domain_id'],\r
+                                   smart_data.tags['disk_domain_id']))\r
+                        smart_data.timestamp = \\r
+                            time.mktime(datetime.datetime.strptime(\r
+                                s_date, '%Y%m%d-%H%M%S').timetuple())\r
+                        self.data.append(smart_data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py
new file mode 100644 (file)
index 0000000..ec1e4cb
--- /dev/null
@@ -0,0 +1,108 @@
+from __future__ import absolute_import\r
+\r
+import socket\r
+\r
+from . import AGENT_VERSION, MetricsAgent, MetricsField\r
+from ...common.clusterdata import ClusterAPI\r
+\r
+\r
+class SAIHostFields(MetricsField):\r
+    """ SAI Host structure """\r
+    measurement = 'sai_host'\r
+\r
+    def __init__(self):\r
+        super(SAIHostFields, self).__init__()\r
+        self.tags['domain_id'] = None\r
+        self.fields['agenthost'] = None\r
+        self.tags['agenthost_domain_id'] = None\r
+        self.fields['cluster_domain_id'] = None\r
+        self.fields['name'] = None\r
+        self.fields['host_ip'] = None\r
+        self.fields['host_ipv6'] = None\r
+        self.fields['host_uuid'] = None\r
+        self.fields['os_type'] = str('ceph')\r
+        self.fields['os_name'] = None\r
+        self.fields['os_version'] = None\r
+        self.fields['agent_version'] = str(AGENT_VERSION)\r
+\r
+\r
+class SAIHostAgent(MetricsAgent):\r
+    measurement = 'sai_host'\r
+\r
+    def _collect_data(self):\r
+        db = ClusterAPI(self._module_inst)\r
+        cluster_id = db.get_cluster_id()\r
+\r
+        hosts = set()\r
+\r
+        # Parse osd's host\r
+        osd_data = db.get_osds()\r
+        for _data in osd_data:\r
+            osd_id = _data['osd']\r
+            if not _data.get('in'):\r
+                continue\r
+            osd_addr = _data['public_addr'].split(':')[0]\r
+            osd_metadata = db.get_osd_metadata(osd_id)\r
+            if osd_metadata:\r
+                osd_host = osd_metadata.get('hostname', 'None')\r
+                if osd_host not in hosts:\r
+                    data = SAIHostFields()\r
+                    data.fields['agenthost'] = str(socket.gethostname())\r
+                    data.tags['agenthost_domain_id'] = \\r
+                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                    data.tags['domain_id'] = \\r
+                        str('%s_%s' % (cluster_id, osd_host))\r
+                    data.fields['cluster_domain_id'] = str(cluster_id)\r
+                    data.fields['host_ip'] = osd_addr\r
+                    data.fields['host_uuid'] = \\r
+                        str('%s_%s' % (cluster_id, osd_host))\r
+                    data.fields['os_name'] = \\r
+                        osd_metadata.get('ceph_release', '')\r
+                    data.fields['os_version'] = \\r
+                        osd_metadata.get('ceph_version_short', '')\r
+                    data.fields['name'] = 'osd_{}'.format(osd_host)\r
+                    hosts.add(osd_host)\r
+                    self.data.append(data)\r
+\r
+        # Parse mon node host\r
+        mons = db.get_mons()\r
+        for _data in mons:\r
+            mon_host = _data['name']\r
+            mon_addr = _data['public_addr'].split(':')[0]\r
+            if mon_host not in hosts:\r
+                data = SAIHostFields()\r
+                data.fields['agenthost'] = str(socket.gethostname())\r
+                data.tags['agenthost_domain_id'] = \\r
+                    str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                data.tags['domain_id'] = \\r
+                    str('%s_%s' % (cluster_id, mon_host))\r
+                data.fields['cluster_domain_id'] = str(cluster_id)\r
+                data.fields['host_ip'] = mon_addr\r
+                data.fields['host_uuid'] = \\r
+                    str('%s_%s' % (cluster_id, mon_host))\r
+                data.fields['name'] = 'mon_{}'.format(mon_host)\r
+                hosts.add((mon_host, mon_addr))\r
+                self.data.append(data)\r
+\r
+        # Parse fs host\r
+        file_systems = db.get_file_systems()\r
+        for _data in file_systems:\r
+            mds_info = _data.get('mdsmap').get('info')\r
+            for _gid in mds_info:\r
+                mds_data = mds_info[_gid]\r
+                mds_addr = mds_data.get('addr').split(':')[0]\r
+                mds_host = mds_data.get('name')\r
+                if mds_host not in hosts:\r
+                    data = SAIHostFields()\r
+                    data.fields['agenthost'] = str(socket.gethostname())\r
+                    data.tags['agenthost_domain_id'] = \\r
+                        str('%s_%s' % (cluster_id, data.fields['agenthost']))\r
+                    data.tags['domain_id'] = \\r
+                        str('%s_%s' % (cluster_id, mds_host))\r
+                    data.fields['cluster_domain_id'] = str(cluster_id)\r
+                    data.fields['host_ip'] = mds_addr\r
+                    data.fields['host_uuid'] = \\r
+                        str('%s_%s' % (cluster_id, mds_host))\r
+                    data.fields['name'] = 'mds_{}'.format(mds_host)\r
+                    hosts.add((mds_host, mds_addr))\r
+                    self.data.append(data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/common/__init__.py b/src/pybind/mgr/diskprediction_cloud/common/__init__.py
new file mode 100644 (file)
index 0000000..cbc3c30
--- /dev/null
@@ -0,0 +1,59 @@
+from __future__ import absolute_import\r
+import errno\r
+from functools import wraps\r
+from six.moves.http_client import BAD_REQUEST\r
+import os\r
+import signal\r
+\r
+\r
+DP_MGR_STAT_OK = 'OK'\r
+DP_MGR_STAT_WARNING = 'WARNING'\r
+DP_MGR_STAT_FAILED = 'FAILED'\r
+DP_MGR_STAT_DISABLED = 'DISABLED'\r
+DP_MGR_STAT_ENABLED = 'ENABLED'\r
+\r
+\r
+class DummyResonse:\r
+    def __init__(self):\r
+        self.resp_json = dict()\r
+        self.content = 'DummyResponse'\r
+        self.status_code = BAD_REQUEST\r
+\r
+    def json(self):\r
+        return self.resp_json\r
+\r
+\r
+class TimeoutError(Exception):\r
+    pass\r
+\r
+\r
+def timeout(seconds=10, error_message=os.strerror(errno.ETIME)):\r
+    def decorator(func):\r
+        def _handle_timeout(signum, frame):\r
+            raise TimeoutError(error_message)\r
+\r
+        def wrapper(*args, **kwargs):\r
+            if hasattr(args[0], '_timeout') is not None:\r
+                seconds = args[0]._timeout\r
+            signal.signal(signal.SIGALRM, _handle_timeout)\r
+            signal.alarm(seconds)\r
+            try:\r
+                result = func(*args, **kwargs)\r
+            finally:\r
+                signal.alarm(0)\r
+            return result\r
+\r
+        return wraps(func)(wrapper)\r
+\r
+    return decorator\r
+\r
+\r
+def get_human_readable(size, precision=2):\r
+    suffixes = ['B', 'KB', 'MB', 'GB', 'TB']\r
+    suffix_index = 0\r
+    while size > 1000 and suffix_index < 4:\r
+        # increment the index of the suffix\r
+        suffix_index += 1\r
+        # apply the division\r
+        size = size/1000.0\r
+    return '%.*d %s' % (precision, size, suffixes[suffix_index])\r
diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py
new file mode 100644 (file)
index 0000000..9f65c73
--- /dev/null
@@ -0,0 +1,1775 @@
+# Generated by the protocol buffer compiler.  DO NOT EDIT!
+# source: mainServer.proto
+
+import sys
+_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
+from google.protobuf import descriptor as _descriptor
+from google.protobuf import message as _message
+from google.protobuf import reflection as _reflection
+from google.protobuf import symbol_database as _symbol_database
+from google.protobuf import descriptor_pb2
+# @@protoc_insertion_point(imports)
+
+_sym_db = _symbol_database.Default()
+
+
+from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
+
+
+DESCRIPTOR = _descriptor.FileDescriptor(
+  name='mainServer.proto',
+  package='proto',
+  syntax='proto3',
+  serialized_pb=_b('\n\x10mainServer.proto\x12\x05proto\x1a\x1cgoogle/api/annotations.proto\"\x07\n\x05\x45mpty\"#\n\x10GeneralMsgOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\")\n\x16GeneralHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x1d\n\nPingOutout\x12\x0f\n\x07message\x18\x01 \x01(\t\"*\n\tTestInput\x12\x1d\n\x06people\x18\x01 \x03(\x0b\x32\r.proto.Person\"\xbe\x01\n\nTestOutput\x12\x10\n\x08strArray\x18\x01 \x03(\t\x12\x31\n\x08mapValue\x18\x02 \x03(\x0b\x32\x1f.proto.TestOutput.MapValueEntry\x12\x19\n\x02pn\x18\x04 \x01(\x0b\x32\r.proto.Person\x12\x1f\n\x07profile\x18\x03 \x03(\x0b\x32\x0e.proto.Profile\x1a/\n\rMapValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcf\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x03\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12)\n\x06phones\x18\x04 \x03(\x0b\x32\x19.proto.Person.PhoneNumber\x1a\x44\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0e\x32\x17.proto.Person.PhoneType\"+\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\"\xa9\x01\n\x07Profile\x12%\n\x08\x66ileInfo\x18\x01 \x01(\x0b\x32\x13.proto.Profile.File\x1aw\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\ttypeInt32\x18\x02 \x01(\x05\x12\x11\n\ttypeInt64\x18\x03 \x01(\x03\x12\x11\n\ttypeFloat\x18\x04 \x01(\x02\x12\x12\n\ntypeDouble\x18\x05 \x01(\x01\x12\x14\n\x0c\x62ooleanValue\x18\x06 \x01(\x08\"4\n\x15GetUsersByStatusInput\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\":\n\x16GetUsersByStatusOutput\x12 \n\x05users\x18\x01 \x03(\x0b\x32\x11.proto.UserOutput\")\n\x16\x41\x63\x63ountHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\nLoginInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\xf2\x01\n\nUserOutput\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05phone\x18\x04 \x01(\t\x12\x11\n\tfirstName\x18\x05 \x01(\t\x12\x10\n\x08lastName\x18\x06 \x01(\t\x12\x13\n\x0b\x63reatedTime\x18\x07 \x01(\t\x12\x11\n\tnamespace\x18\x08 \x01(\t\x12\x12\n\ndomainName\x18\t \x01(\t\x12\x0f\n\x07\x63ompany\x18\n \x01(\t\x12\x0b\n\x03url\x18\x0b \x01(\t\x12\x14\n\x0c\x61gentAccount\x18\x0c \x01(\t\x12\x15\n\ragentPassword\x18\r \x01(\t\"s\n\x0bSingupInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\r\n\x05phone\x18\x02 \x01(\t\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x10\n\x08password\x18\x05 \x01(\t\x12\x0f\n\x07\x63ompany\x18\x06 \x01(\t\"\x1f\n\x0cSingupOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\x0f\x44\x65leteUserInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"C\n\x15UpdateUserStatusInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\"\'\n\x16ResendConfirmCodeInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\"+\n\x0c\x43onfirmInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\"$\n\x11\x44PHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"n\n\x17\x44PGetPhysicalDisksInput\x12\x0f\n\x07hostIds\x18\x01 \x01(\t\x12\x0b\n\x03ids\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"{\n\x19\x44PGetDisksPredictionInput\x12\x17\n\x0fphysicalDiskIds\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"\x1e\n\x0e\x44PBinaryOutput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\",\n\x19\x43ollectionHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\"\n\x10PostMetricsInput\x12\x0e\n\x06points\x18\x01 \x03(\t\" \n\x10PostDBRelayInput\x12\x0c\n\x04\x63mds\x18\x01 \x03(\t\":\n\x17\x43ollectionMessageOutput\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2\x85\x02\n\x07General\x12\x63\n\x10GeneralHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.GeneralHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/general/heartbeat\x12\x46\n\x04Ping\x12\x0c.proto.Empty\x1a\x11.proto.PingOutout\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/general/ping\x12M\n\x04Test\x12\x10.proto.TestInput\x1a\x11.proto.TestOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/general/test:\x01*2\xa4\x06\n\x07\x41\x63\x63ount\x12\x63\n\x10\x41\x63\x63ountHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.AccountHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/account/heartbeat\x12N\n\x05Login\x12\x11.proto.LoginInput\x1a\x11.proto.UserOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x14/apis/v2/users/login:\x01*\x12S\n\x06Signup\x12\x12.proto.SingupInput\x1a\x13.proto.SingupOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/users/signup:\x01*\x12r\n\x11ResendConfirmCode\x12\x1d.proto.ResendConfirmCodeInput\x1a\x17.proto.GeneralMsgOutput\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/apis/v2/users/confirmcode:\x01*\x12_\n\x07\x43onfirm\x12\x13.proto.ConfirmInput\x1a\x17.proto.GeneralMsgOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/users/confirmation:\x01*\x12g\n\x10GetUsersByStatus\x12\x1c.proto.GetUsersByStatusInput\x1a\x1d.proto.GetUsersByStatusOutput\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/apis/v2/users\x12\x63\n\nDeleteUser\x12\x16.proto.DeleteUserInput\x1a\x17.proto.GeneralMsgOutput\"$\x82\xd3\xe4\x93\x02\x1e*\x1c/apis/v2/users/{email}/{key}\x12l\n\x10UpdateUserStatus\x12\x1c.proto.UpdateUserStatusInput\x1a\x17.proto.GeneralMsgOutput\"!\x82\xd3\xe4\x93\x02\x1b\x1a\x16/apis/v2/users/{email}:\x01*2\xcf\x02\n\x0b\x44iskprophet\x12T\n\x0b\x44PHeartbeat\x12\x0c.proto.Empty\x1a\x18.proto.DPHeartbeatOutput\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/dp/heartbeat\x12l\n\x12\x44PGetPhysicalDisks\x12\x1e.proto.DPGetPhysicalDisksInput\x1a\x15.proto.DPBinaryOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/apis/v2/physical-disks\x12|\n\x14\x44PGetDisksPrediction\x12 .proto.DPGetDisksPredictionInput\x1a\x15.proto.DPBinaryOutput\"+\x82\xd3\xe4\x93\x02%\x12#/apis/v2/physical-disks/predictions2\xdb\x02\n\nCollection\x12l\n\x13\x43ollectionHeartbeat\x12\x0c.proto.Empty\x1a .proto.CollectionHeartbeatOutput\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/apis/v2/collection/heartbeat\x12o\n\x0bPostDBRelay\x12\x17.proto.PostDBRelayInput\x1a\x1e.proto.CollectionMessageOutput\"\'\x82\xd3\xe4\x93\x02!\"\x1c/apis/v2/collection/relation:\x01*\x12n\n\x0bPostMetrics\x12\x17.proto.PostMetricsInput\x1a\x1e.proto.CollectionMessageOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/collection/metrics:\x01*b\x06proto3')
+  ,
+  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
+
+
+
+_PERSON_PHONETYPE = _descriptor.EnumDescriptor(
+  name='PhoneType',
+  full_name='proto.Person.PhoneType',
+  filename=None,
+  file=DESCRIPTOR,
+  values=[
+    _descriptor.EnumValueDescriptor(
+      name='MOBILE', index=0, number=0,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='HOME', index=1, number=1,
+      options=None,
+      type=None),
+    _descriptor.EnumValueDescriptor(
+      name='WORK', index=2, number=2,
+      options=None,
+      type=None),
+  ],
+  containing_type=None,
+  options=None,
+  serialized_start=579,
+  serialized_end=622,
+)
+_sym_db.RegisterEnumDescriptor(_PERSON_PHONETYPE)
+
+
+_EMPTY = _descriptor.Descriptor(
+  name='Empty',
+  full_name='proto.Empty',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=57,
+  serialized_end=64,
+)
+
+
+_GENERALMSGOUTPUT = _descriptor.Descriptor(
+  name='GeneralMsgOutput',
+  full_name='proto.GeneralMsgOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.GeneralMsgOutput.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=66,
+  serialized_end=101,
+)
+
+
+_GENERALHEARTBEATOUTPUT = _descriptor.Descriptor(
+  name='GeneralHeartbeatOutput',
+  full_name='proto.GeneralHeartbeatOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.GeneralHeartbeatOutput.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=103,
+  serialized_end=144,
+)
+
+
+_PINGOUTOUT = _descriptor.Descriptor(
+  name='PingOutout',
+  full_name='proto.PingOutout',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.PingOutout.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=146,
+  serialized_end=175,
+)
+
+
+_TESTINPUT = _descriptor.Descriptor(
+  name='TestInput',
+  full_name='proto.TestInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='people', full_name='proto.TestInput.people', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=177,
+  serialized_end=219,
+)
+
+
+_TESTOUTPUT_MAPVALUEENTRY = _descriptor.Descriptor(
+  name='MapValueEntry',
+  full_name='proto.TestOutput.MapValueEntry',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='key', full_name='proto.TestOutput.MapValueEntry.key', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='value', full_name='proto.TestOutput.MapValueEntry.value', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=365,
+  serialized_end=412,
+)
+
+_TESTOUTPUT = _descriptor.Descriptor(
+  name='TestOutput',
+  full_name='proto.TestOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='strArray', full_name='proto.TestOutput.strArray', index=0,
+      number=1, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='mapValue', full_name='proto.TestOutput.mapValue', index=1,
+      number=2, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='pn', full_name='proto.TestOutput.pn', index=2,
+      number=4, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='profile', full_name='proto.TestOutput.profile', index=3,
+      number=3, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_TESTOUTPUT_MAPVALUEENTRY, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=222,
+  serialized_end=412,
+)
+
+
+_PERSON_PHONENUMBER = _descriptor.Descriptor(
+  name='PhoneNumber',
+  full_name='proto.Person.PhoneNumber',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='number', full_name='proto.Person.PhoneNumber.number', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='type', full_name='proto.Person.PhoneNumber.type', index=1,
+      number=2, type=14, cpp_type=8, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=509,
+  serialized_end=577,
+)
+
+_PERSON = _descriptor.Descriptor(
+  name='Person',
+  full_name='proto.Person',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='proto.Person.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='id', full_name='proto.Person.id', index=1,
+      number=2, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.Person.email', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='phones', full_name='proto.Person.phones', index=3,
+      number=4, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_PERSON_PHONENUMBER, ],
+  enum_types=[
+    _PERSON_PHONETYPE,
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=415,
+  serialized_end=622,
+)
+
+
+_PROFILE_FILE = _descriptor.Descriptor(
+  name='File',
+  full_name='proto.Profile.File',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='name', full_name='proto.Profile.File.name', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='typeInt32', full_name='proto.Profile.File.typeInt32', index=1,
+      number=2, type=5, cpp_type=1, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='typeInt64', full_name='proto.Profile.File.typeInt64', index=2,
+      number=3, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='typeFloat', full_name='proto.Profile.File.typeFloat', index=3,
+      number=4, type=2, cpp_type=6, label=1,
+      has_default_value=False, default_value=float(0),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='typeDouble', full_name='proto.Profile.File.typeDouble', index=4,
+      number=5, type=1, cpp_type=5, label=1,
+      has_default_value=False, default_value=float(0),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='booleanValue', full_name='proto.Profile.File.booleanValue', index=5,
+      number=6, type=8, cpp_type=7, label=1,
+      has_default_value=False, default_value=False,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=675,
+  serialized_end=794,
+)
+
+_PROFILE = _descriptor.Descriptor(
+  name='Profile',
+  full_name='proto.Profile',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='fileInfo', full_name='proto.Profile.fileInfo', index=0,
+      number=1, type=11, cpp_type=10, label=1,
+      has_default_value=False, default_value=None,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[_PROFILE_FILE, ],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=625,
+  serialized_end=794,
+)
+
+
+_GETUSERSBYSTATUSINPUT = _descriptor.Descriptor(
+  name='GetUsersByStatusInput',
+  full_name='proto.GetUsersByStatusInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='status', full_name='proto.GetUsersByStatusInput.status', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='key', full_name='proto.GetUsersByStatusInput.key', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=796,
+  serialized_end=848,
+)
+
+
+_GETUSERSBYSTATUSOUTPUT = _descriptor.Descriptor(
+  name='GetUsersByStatusOutput',
+  full_name='proto.GetUsersByStatusOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='users', full_name='proto.GetUsersByStatusOutput.users', index=0,
+      number=1, type=11, cpp_type=10, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=850,
+  serialized_end=908,
+)
+
+
+_ACCOUNTHEARTBEATOUTPUT = _descriptor.Descriptor(
+  name='AccountHeartbeatOutput',
+  full_name='proto.AccountHeartbeatOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.AccountHeartbeatOutput.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=910,
+  serialized_end=951,
+)
+
+
+_LOGININPUT = _descriptor.Descriptor(
+  name='LoginInput',
+  full_name='proto.LoginInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.LoginInput.email', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='password', full_name='proto.LoginInput.password', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=953,
+  serialized_end=998,
+)
+
+
+_USEROUTPUT = _descriptor.Descriptor(
+  name='UserOutput',
+  full_name='proto.UserOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='id', full_name='proto.UserOutput.id', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.UserOutput.email', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='status', full_name='proto.UserOutput.status', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='phone', full_name='proto.UserOutput.phone', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='firstName', full_name='proto.UserOutput.firstName', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='lastName', full_name='proto.UserOutput.lastName', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='createdTime', full_name='proto.UserOutput.createdTime', index=6,
+      number=7, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='namespace', full_name='proto.UserOutput.namespace', index=7,
+      number=8, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='domainName', full_name='proto.UserOutput.domainName', index=8,
+      number=9, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='company', full_name='proto.UserOutput.company', index=9,
+      number=10, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='url', full_name='proto.UserOutput.url', index=10,
+      number=11, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='agentAccount', full_name='proto.UserOutput.agentAccount', index=11,
+      number=12, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='agentPassword', full_name='proto.UserOutput.agentPassword', index=12,
+      number=13, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1001,
+  serialized_end=1243,
+)
+
+
+_SINGUPINPUT = _descriptor.Descriptor(
+  name='SingupInput',
+  full_name='proto.SingupInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.SingupInput.email', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='phone', full_name='proto.SingupInput.phone', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='firstName', full_name='proto.SingupInput.firstName', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='lastName', full_name='proto.SingupInput.lastName', index=3,
+      number=4, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='password', full_name='proto.SingupInput.password', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='company', full_name='proto.SingupInput.company', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1245,
+  serialized_end=1360,
+)
+
+
+_SINGUPOUTPUT = _descriptor.Descriptor(
+  name='SingupOutput',
+  full_name='proto.SingupOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.SingupOutput.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1362,
+  serialized_end=1393,
+)
+
+
+_DELETEUSERINPUT = _descriptor.Descriptor(
+  name='DeleteUserInput',
+  full_name='proto.DeleteUserInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.DeleteUserInput.email', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='key', full_name='proto.DeleteUserInput.key', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1395,
+  serialized_end=1440,
+)
+
+
+_UPDATEUSERSTATUSINPUT = _descriptor.Descriptor(
+  name='UpdateUserStatusInput',
+  full_name='proto.UpdateUserStatusInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.UpdateUserStatusInput.email', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='key', full_name='proto.UpdateUserStatusInput.key', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='status', full_name='proto.UpdateUserStatusInput.status', index=2,
+      number=3, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1442,
+  serialized_end=1509,
+)
+
+
+_RESENDCONFIRMCODEINPUT = _descriptor.Descriptor(
+  name='ResendConfirmCodeInput',
+  full_name='proto.ResendConfirmCodeInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.ResendConfirmCodeInput.email', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1511,
+  serialized_end=1550,
+)
+
+
+_CONFIRMINPUT = _descriptor.Descriptor(
+  name='ConfirmInput',
+  full_name='proto.ConfirmInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='email', full_name='proto.ConfirmInput.email', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='code', full_name='proto.ConfirmInput.code', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1552,
+  serialized_end=1595,
+)
+
+
+_DPHEARTBEATOUTPUT = _descriptor.Descriptor(
+  name='DPHeartbeatOutput',
+  full_name='proto.DPHeartbeatOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.DPHeartbeatOutput.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1597,
+  serialized_end=1633,
+)
+
+
+_DPGETPHYSICALDISKSINPUT = _descriptor.Descriptor(
+  name='DPGetPhysicalDisksInput',
+  full_name='proto.DPGetPhysicalDisksInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='hostIds', full_name='proto.DPGetPhysicalDisksInput.hostIds', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='ids', full_name='proto.DPGetPhysicalDisksInput.ids', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='limit', full_name='proto.DPGetPhysicalDisksInput.limit', index=2,
+      number=3, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='page', full_name='proto.DPGetPhysicalDisksInput.page', index=3,
+      number=4, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='from', full_name='proto.DPGetPhysicalDisksInput.from', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='to', full_name='proto.DPGetPhysicalDisksInput.to', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1635,
+  serialized_end=1745,
+)
+
+
+_DPGETDISKSPREDICTIONINPUT = _descriptor.Descriptor(
+  name='DPGetDisksPredictionInput',
+  full_name='proto.DPGetDisksPredictionInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='physicalDiskIds', full_name='proto.DPGetDisksPredictionInput.physicalDiskIds', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='status', full_name='proto.DPGetDisksPredictionInput.status', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='limit', full_name='proto.DPGetDisksPredictionInput.limit', index=2,
+      number=3, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='page', full_name='proto.DPGetDisksPredictionInput.page', index=3,
+      number=4, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='from', full_name='proto.DPGetDisksPredictionInput.from', index=4,
+      number=5, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='to', full_name='proto.DPGetDisksPredictionInput.to', index=5,
+      number=6, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1747,
+  serialized_end=1870,
+)
+
+
+_DPBINARYOUTPUT = _descriptor.Descriptor(
+  name='DPBinaryOutput',
+  full_name='proto.DPBinaryOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='data', full_name='proto.DPBinaryOutput.data', index=0,
+      number=1, type=12, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b(""),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1872,
+  serialized_end=1902,
+)
+
+
+_COLLECTIONHEARTBEATOUTPUT = _descriptor.Descriptor(
+  name='CollectionHeartbeatOutput',
+  full_name='proto.CollectionHeartbeatOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.CollectionHeartbeatOutput.message', index=0,
+      number=1, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1904,
+  serialized_end=1948,
+)
+
+
+_POSTMETRICSINPUT = _descriptor.Descriptor(
+  name='PostMetricsInput',
+  full_name='proto.PostMetricsInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='points', full_name='proto.PostMetricsInput.points', index=0,
+      number=1, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1950,
+  serialized_end=1984,
+)
+
+
+_POSTDBRELAYINPUT = _descriptor.Descriptor(
+  name='PostDBRelayInput',
+  full_name='proto.PostDBRelayInput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='cmds', full_name='proto.PostDBRelayInput.cmds', index=0,
+      number=1, type=9, cpp_type=9, label=3,
+      has_default_value=False, default_value=[],
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=1986,
+  serialized_end=2018,
+)
+
+
+_COLLECTIONMESSAGEOUTPUT = _descriptor.Descriptor(
+  name='CollectionMessageOutput',
+  full_name='proto.CollectionMessageOutput',
+  filename=None,
+  file=DESCRIPTOR,
+  containing_type=None,
+  fields=[
+    _descriptor.FieldDescriptor(
+      name='status', full_name='proto.CollectionMessageOutput.status', index=0,
+      number=1, type=3, cpp_type=2, label=1,
+      has_default_value=False, default_value=0,
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+    _descriptor.FieldDescriptor(
+      name='message', full_name='proto.CollectionMessageOutput.message', index=1,
+      number=2, type=9, cpp_type=9, label=1,
+      has_default_value=False, default_value=_b("").decode('utf-8'),
+      message_type=None, enum_type=None, containing_type=None,
+      is_extension=False, extension_scope=None,
+      options=None, file=DESCRIPTOR),
+  ],
+  extensions=[
+  ],
+  nested_types=[],
+  enum_types=[
+  ],
+  options=None,
+  is_extendable=False,
+  syntax='proto3',
+  extension_ranges=[],
+  oneofs=[
+  ],
+  serialized_start=2020,
+  serialized_end=2078,
+)
+
+_TESTINPUT.fields_by_name['people'].message_type = _PERSON
+_TESTOUTPUT_MAPVALUEENTRY.containing_type = _TESTOUTPUT
+_TESTOUTPUT.fields_by_name['mapValue'].message_type = _TESTOUTPUT_MAPVALUEENTRY
+_TESTOUTPUT.fields_by_name['pn'].message_type = _PERSON
+_TESTOUTPUT.fields_by_name['profile'].message_type = _PROFILE
+_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE
+_PERSON_PHONENUMBER.containing_type = _PERSON
+_PERSON.fields_by_name['phones'].message_type = _PERSON_PHONENUMBER
+_PERSON_PHONETYPE.containing_type = _PERSON
+_PROFILE_FILE.containing_type = _PROFILE
+_PROFILE.fields_by_name['fileInfo'].message_type = _PROFILE_FILE
+_GETUSERSBYSTATUSOUTPUT.fields_by_name['users'].message_type = _USEROUTPUT
+DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
+DESCRIPTOR.message_types_by_name['GeneralMsgOutput'] = _GENERALMSGOUTPUT
+DESCRIPTOR.message_types_by_name['GeneralHeartbeatOutput'] = _GENERALHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['PingOutout'] = _PINGOUTOUT
+DESCRIPTOR.message_types_by_name['TestInput'] = _TESTINPUT
+DESCRIPTOR.message_types_by_name['TestOutput'] = _TESTOUTPUT
+DESCRIPTOR.message_types_by_name['Person'] = _PERSON
+DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE
+DESCRIPTOR.message_types_by_name['GetUsersByStatusInput'] = _GETUSERSBYSTATUSINPUT
+DESCRIPTOR.message_types_by_name['GetUsersByStatusOutput'] = _GETUSERSBYSTATUSOUTPUT
+DESCRIPTOR.message_types_by_name['AccountHeartbeatOutput'] = _ACCOUNTHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['LoginInput'] = _LOGININPUT
+DESCRIPTOR.message_types_by_name['UserOutput'] = _USEROUTPUT
+DESCRIPTOR.message_types_by_name['SingupInput'] = _SINGUPINPUT
+DESCRIPTOR.message_types_by_name['SingupOutput'] = _SINGUPOUTPUT
+DESCRIPTOR.message_types_by_name['DeleteUserInput'] = _DELETEUSERINPUT
+DESCRIPTOR.message_types_by_name['UpdateUserStatusInput'] = _UPDATEUSERSTATUSINPUT
+DESCRIPTOR.message_types_by_name['ResendConfirmCodeInput'] = _RESENDCONFIRMCODEINPUT
+DESCRIPTOR.message_types_by_name['ConfirmInput'] = _CONFIRMINPUT
+DESCRIPTOR.message_types_by_name['DPHeartbeatOutput'] = _DPHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['DPGetPhysicalDisksInput'] = _DPGETPHYSICALDISKSINPUT
+DESCRIPTOR.message_types_by_name['DPGetDisksPredictionInput'] = _DPGETDISKSPREDICTIONINPUT
+DESCRIPTOR.message_types_by_name['DPBinaryOutput'] = _DPBINARYOUTPUT
+DESCRIPTOR.message_types_by_name['CollectionHeartbeatOutput'] = _COLLECTIONHEARTBEATOUTPUT
+DESCRIPTOR.message_types_by_name['PostMetricsInput'] = _POSTMETRICSINPUT
+DESCRIPTOR.message_types_by_name['PostDBRelayInput'] = _POSTDBRELAYINPUT
+DESCRIPTOR.message_types_by_name['CollectionMessageOutput'] = _COLLECTIONMESSAGEOUTPUT
+_sym_db.RegisterFileDescriptor(DESCRIPTOR)
+
+Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
+  DESCRIPTOR = _EMPTY,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.Empty)
+  ))
+_sym_db.RegisterMessage(Empty)
+
+GeneralMsgOutput = _reflection.GeneratedProtocolMessageType('GeneralMsgOutput', (_message.Message,), dict(
+  DESCRIPTOR = _GENERALMSGOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.GeneralMsgOutput)
+  ))
+_sym_db.RegisterMessage(GeneralMsgOutput)
+
+GeneralHeartbeatOutput = _reflection.GeneratedProtocolMessageType('GeneralHeartbeatOutput', (_message.Message,), dict(
+  DESCRIPTOR = _GENERALHEARTBEATOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.GeneralHeartbeatOutput)
+  ))
+_sym_db.RegisterMessage(GeneralHeartbeatOutput)
+
+PingOutout = _reflection.GeneratedProtocolMessageType('PingOutout', (_message.Message,), dict(
+  DESCRIPTOR = _PINGOUTOUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.PingOutout)
+  ))
+_sym_db.RegisterMessage(PingOutout)
+
+TestInput = _reflection.GeneratedProtocolMessageType('TestInput', (_message.Message,), dict(
+  DESCRIPTOR = _TESTINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.TestInput)
+  ))
+_sym_db.RegisterMessage(TestInput)
+
+TestOutput = _reflection.GeneratedProtocolMessageType('TestOutput', (_message.Message,), dict(
+
+  MapValueEntry = _reflection.GeneratedProtocolMessageType('MapValueEntry', (_message.Message,), dict(
+    DESCRIPTOR = _TESTOUTPUT_MAPVALUEENTRY,
+    __module__ = 'mainServer_pb2'
+    # @@protoc_insertion_point(class_scope:proto.TestOutput.MapValueEntry)
+    ))
+  ,
+  DESCRIPTOR = _TESTOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.TestOutput)
+  ))
+_sym_db.RegisterMessage(TestOutput)
+_sym_db.RegisterMessage(TestOutput.MapValueEntry)
+
+Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
+
+  PhoneNumber = _reflection.GeneratedProtocolMessageType('PhoneNumber', (_message.Message,), dict(
+    DESCRIPTOR = _PERSON_PHONENUMBER,
+    __module__ = 'mainServer_pb2'
+    # @@protoc_insertion_point(class_scope:proto.Person.PhoneNumber)
+    ))
+  ,
+  DESCRIPTOR = _PERSON,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.Person)
+  ))
+_sym_db.RegisterMessage(Person)
+_sym_db.RegisterMessage(Person.PhoneNumber)
+
+Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict(
+
+  File = _reflection.GeneratedProtocolMessageType('File', (_message.Message,), dict(
+    DESCRIPTOR = _PROFILE_FILE,
+    __module__ = 'mainServer_pb2'
+    # @@protoc_insertion_point(class_scope:proto.Profile.File)
+    ))
+  ,
+  DESCRIPTOR = _PROFILE,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.Profile)
+  ))
+_sym_db.RegisterMessage(Profile)
+_sym_db.RegisterMessage(Profile.File)
+
+GetUsersByStatusInput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusInput', (_message.Message,), dict(
+  DESCRIPTOR = _GETUSERSBYSTATUSINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusInput)
+  ))
+_sym_db.RegisterMessage(GetUsersByStatusInput)
+
+GetUsersByStatusOutput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusOutput', (_message.Message,), dict(
+  DESCRIPTOR = _GETUSERSBYSTATUSOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusOutput)
+  ))
+_sym_db.RegisterMessage(GetUsersByStatusOutput)
+
+AccountHeartbeatOutput = _reflection.GeneratedProtocolMessageType('AccountHeartbeatOutput', (_message.Message,), dict(
+  DESCRIPTOR = _ACCOUNTHEARTBEATOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.AccountHeartbeatOutput)
+  ))
+_sym_db.RegisterMessage(AccountHeartbeatOutput)
+
+LoginInput = _reflection.GeneratedProtocolMessageType('LoginInput', (_message.Message,), dict(
+  DESCRIPTOR = _LOGININPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.LoginInput)
+  ))
+_sym_db.RegisterMessage(LoginInput)
+
+UserOutput = _reflection.GeneratedProtocolMessageType('UserOutput', (_message.Message,), dict(
+  DESCRIPTOR = _USEROUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.UserOutput)
+  ))
+_sym_db.RegisterMessage(UserOutput)
+
+SingupInput = _reflection.GeneratedProtocolMessageType('SingupInput', (_message.Message,), dict(
+  DESCRIPTOR = _SINGUPINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.SingupInput)
+  ))
+_sym_db.RegisterMessage(SingupInput)
+
+SingupOutput = _reflection.GeneratedProtocolMessageType('SingupOutput', (_message.Message,), dict(
+  DESCRIPTOR = _SINGUPOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.SingupOutput)
+  ))
+_sym_db.RegisterMessage(SingupOutput)
+
+DeleteUserInput = _reflection.GeneratedProtocolMessageType('DeleteUserInput', (_message.Message,), dict(
+  DESCRIPTOR = _DELETEUSERINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.DeleteUserInput)
+  ))
+_sym_db.RegisterMessage(DeleteUserInput)
+
+UpdateUserStatusInput = _reflection.GeneratedProtocolMessageType('UpdateUserStatusInput', (_message.Message,), dict(
+  DESCRIPTOR = _UPDATEUSERSTATUSINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.UpdateUserStatusInput)
+  ))
+_sym_db.RegisterMessage(UpdateUserStatusInput)
+
+ResendConfirmCodeInput = _reflection.GeneratedProtocolMessageType('ResendConfirmCodeInput', (_message.Message,), dict(
+  DESCRIPTOR = _RESENDCONFIRMCODEINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.ResendConfirmCodeInput)
+  ))
+_sym_db.RegisterMessage(ResendConfirmCodeInput)
+
+ConfirmInput = _reflection.GeneratedProtocolMessageType('ConfirmInput', (_message.Message,), dict(
+  DESCRIPTOR = _CONFIRMINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.ConfirmInput)
+  ))
+_sym_db.RegisterMessage(ConfirmInput)
+
+DPHeartbeatOutput = _reflection.GeneratedProtocolMessageType('DPHeartbeatOutput', (_message.Message,), dict(
+  DESCRIPTOR = _DPHEARTBEATOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.DPHeartbeatOutput)
+  ))
+_sym_db.RegisterMessage(DPHeartbeatOutput)
+
+DPGetPhysicalDisksInput = _reflection.GeneratedProtocolMessageType('DPGetPhysicalDisksInput', (_message.Message,), dict(
+  DESCRIPTOR = _DPGETPHYSICALDISKSINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.DPGetPhysicalDisksInput)
+  ))
+_sym_db.RegisterMessage(DPGetPhysicalDisksInput)
+
+DPGetDisksPredictionInput = _reflection.GeneratedProtocolMessageType('DPGetDisksPredictionInput', (_message.Message,), dict(
+  DESCRIPTOR = _DPGETDISKSPREDICTIONINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.DPGetDisksPredictionInput)
+  ))
+_sym_db.RegisterMessage(DPGetDisksPredictionInput)
+
+DPBinaryOutput = _reflection.GeneratedProtocolMessageType('DPBinaryOutput', (_message.Message,), dict(
+  DESCRIPTOR = _DPBINARYOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.DPBinaryOutput)
+  ))
+_sym_db.RegisterMessage(DPBinaryOutput)
+
+CollectionHeartbeatOutput = _reflection.GeneratedProtocolMessageType('CollectionHeartbeatOutput', (_message.Message,), dict(
+  DESCRIPTOR = _COLLECTIONHEARTBEATOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.CollectionHeartbeatOutput)
+  ))
+_sym_db.RegisterMessage(CollectionHeartbeatOutput)
+
+PostMetricsInput = _reflection.GeneratedProtocolMessageType('PostMetricsInput', (_message.Message,), dict(
+  DESCRIPTOR = _POSTMETRICSINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.PostMetricsInput)
+  ))
+_sym_db.RegisterMessage(PostMetricsInput)
+
+PostDBRelayInput = _reflection.GeneratedProtocolMessageType('PostDBRelayInput', (_message.Message,), dict(
+  DESCRIPTOR = _POSTDBRELAYINPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.PostDBRelayInput)
+  ))
+_sym_db.RegisterMessage(PostDBRelayInput)
+
+CollectionMessageOutput = _reflection.GeneratedProtocolMessageType('CollectionMessageOutput', (_message.Message,), dict(
+  DESCRIPTOR = _COLLECTIONMESSAGEOUTPUT,
+  __module__ = 'mainServer_pb2'
+  # @@protoc_insertion_point(class_scope:proto.CollectionMessageOutput)
+  ))
+_sym_db.RegisterMessage(CollectionMessageOutput)
+
+
+_TESTOUTPUT_MAPVALUEENTRY.has_options = True
+_TESTOUTPUT_MAPVALUEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
+
+_GENERAL = _descriptor.ServiceDescriptor(
+  name='General',
+  full_name='proto.General',
+  file=DESCRIPTOR,
+  index=0,
+  options=None,
+  serialized_start=2081,
+  serialized_end=2342,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='GeneralHeartbeat',
+    full_name='proto.General.GeneralHeartbeat',
+    index=0,
+    containing_service=None,
+    input_type=_EMPTY,
+    output_type=_GENERALHEARTBEATOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/general/heartbeat')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='Ping',
+    full_name='proto.General.Ping',
+    index=1,
+    containing_service=None,
+    input_type=_EMPTY,
+    output_type=_PINGOUTOUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/general/ping')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='Test',
+    full_name='proto.General.Test',
+    index=2,
+    containing_service=None,
+    input_type=_TESTINPUT,
+    output_type=_TESTOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/general/test:\001*')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_GENERAL)
+
+DESCRIPTOR.services_by_name['General'] = _GENERAL
+
+
+_ACCOUNT = _descriptor.ServiceDescriptor(
+  name='Account',
+  full_name='proto.Account',
+  file=DESCRIPTOR,
+  index=1,
+  options=None,
+  serialized_start=2345,
+  serialized_end=3149,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='AccountHeartbeat',
+    full_name='proto.Account.AccountHeartbeat',
+    index=0,
+    containing_service=None,
+    input_type=_EMPTY,
+    output_type=_ACCOUNTHEARTBEATOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/account/heartbeat')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='Login',
+    full_name='proto.Account.Login',
+    index=1,
+    containing_service=None,
+    input_type=_LOGININPUT,
+    output_type=_USEROUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\"\024/apis/v2/users/login:\001*')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='Signup',
+    full_name='proto.Account.Signup',
+    index=2,
+    containing_service=None,
+    input_type=_SINGUPINPUT,
+    output_type=_SINGUPOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/users/signup:\001*')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='ResendConfirmCode',
+    full_name='proto.Account.ResendConfirmCode',
+    index=3,
+    containing_service=None,
+    input_type=_RESENDCONFIRMCODEINPUT,
+    output_type=_GENERALMSGOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\"\032/apis/v2/users/confirmcode:\001*')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='Confirm',
+    full_name='proto.Account.Confirm',
+    index=4,
+    containing_service=None,
+    input_type=_CONFIRMINPUT,
+    output_type=_GENERALMSGOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/users/confirmation:\001*')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='GetUsersByStatus',
+    full_name='proto.Account.GetUsersByStatus',
+    index=5,
+    containing_service=None,
+    input_type=_GETUSERSBYSTATUSINPUT,
+    output_type=_GETUSERSBYSTATUSOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\020\022\016/apis/v2/users')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='DeleteUser',
+    full_name='proto.Account.DeleteUser',
+    index=6,
+    containing_service=None,
+    input_type=_DELETEUSERINPUT,
+    output_type=_GENERALMSGOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\036*\034/apis/v2/users/{email}/{key}')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='UpdateUserStatus',
+    full_name='proto.Account.UpdateUserStatus',
+    index=7,
+    containing_service=None,
+    input_type=_UPDATEUSERSTATUSINPUT,
+    output_type=_GENERALMSGOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\033\032\026/apis/v2/users/{email}:\001*')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_ACCOUNT)
+
+DESCRIPTOR.services_by_name['Account'] = _ACCOUNT
+
+
+_DISKPROPHET = _descriptor.ServiceDescriptor(
+  name='Diskprophet',
+  full_name='proto.Diskprophet',
+  file=DESCRIPTOR,
+  index=2,
+  options=None,
+  serialized_start=3152,
+  serialized_end=3487,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='DPHeartbeat',
+    full_name='proto.Diskprophet.DPHeartbeat',
+    index=0,
+    containing_service=None,
+    input_type=_EMPTY,
+    output_type=_DPHEARTBEATOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/dp/heartbeat')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='DPGetPhysicalDisks',
+    full_name='proto.Diskprophet.DPGetPhysicalDisks',
+    index=1,
+    containing_service=None,
+    input_type=_DPGETPHYSICALDISKSINPUT,
+    output_type=_DPBINARYOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\022\027/apis/v2/physical-disks')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='DPGetDisksPrediction',
+    full_name='proto.Diskprophet.DPGetDisksPrediction',
+    index=2,
+    containing_service=None,
+    input_type=_DPGETDISKSPREDICTIONINPUT,
+    output_type=_DPBINARYOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/apis/v2/physical-disks/predictions')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_DISKPROPHET)
+
+DESCRIPTOR.services_by_name['Diskprophet'] = _DISKPROPHET
+
+
+_COLLECTION = _descriptor.ServiceDescriptor(
+  name='Collection',
+  full_name='proto.Collection',
+  file=DESCRIPTOR,
+  index=3,
+  options=None,
+  serialized_start=3490,
+  serialized_end=3837,
+  methods=[
+  _descriptor.MethodDescriptor(
+    name='CollectionHeartbeat',
+    full_name='proto.Collection.CollectionHeartbeat',
+    index=0,
+    containing_service=None,
+    input_type=_EMPTY,
+    output_type=_COLLECTIONHEARTBEATOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\022\035/apis/v2/collection/heartbeat')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='PostDBRelay',
+    full_name='proto.Collection.PostDBRelay',
+    index=1,
+    containing_service=None,
+    input_type=_POSTDBRELAYINPUT,
+    output_type=_COLLECTIONMESSAGEOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002!\"\034/apis/v2/collection/relation:\001*')),
+  ),
+  _descriptor.MethodDescriptor(
+    name='PostMetrics',
+    full_name='proto.Collection.PostMetrics',
+    index=2,
+    containing_service=None,
+    input_type=_POSTMETRICSINPUT,
+    output_type=_COLLECTIONMESSAGEOUTPUT,
+    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/collection/metrics:\001*')),
+  ),
+])
+_sym_db.RegisterServiceDescriptor(_COLLECTION)
+
+DESCRIPTOR.services_by_name['Collection'] = _COLLECTION
+
+# @@protoc_insertion_point(module_scope)
diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py
new file mode 100644 (file)
index 0000000..c1c3217
--- /dev/null
@@ -0,0 +1,395 @@
+# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
+import grpc
+
+import client_pb2 as mainServer__pb2
+
+
+class GeneralStub(object):
+  """-------------------------- General -------------------------------------
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.GeneralHeartbeat = channel.unary_unary(
+        '/proto.General/GeneralHeartbeat',
+        request_serializer=mainServer__pb2.Empty.SerializeToString,
+        response_deserializer=mainServer__pb2.GeneralHeartbeatOutput.FromString,
+        )
+    self.Ping = channel.unary_unary(
+        '/proto.General/Ping',
+        request_serializer=mainServer__pb2.Empty.SerializeToString,
+        response_deserializer=mainServer__pb2.PingOutout.FromString,
+        )
+    self.Test = channel.unary_unary(
+        '/proto.General/Test',
+        request_serializer=mainServer__pb2.TestInput.SerializeToString,
+        response_deserializer=mainServer__pb2.TestOutput.FromString,
+        )
+
+
+class GeneralServicer(object):
+  """-------------------------- General -------------------------------------
+  """
+
+  def GeneralHeartbeat(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def Ping(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def Test(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_GeneralServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'GeneralHeartbeat': grpc.unary_unary_rpc_method_handler(
+          servicer.GeneralHeartbeat,
+          request_deserializer=mainServer__pb2.Empty.FromString,
+          response_serializer=mainServer__pb2.GeneralHeartbeatOutput.SerializeToString,
+      ),
+      'Ping': grpc.unary_unary_rpc_method_handler(
+          servicer.Ping,
+          request_deserializer=mainServer__pb2.Empty.FromString,
+          response_serializer=mainServer__pb2.PingOutout.SerializeToString,
+      ),
+      'Test': grpc.unary_unary_rpc_method_handler(
+          servicer.Test,
+          request_deserializer=mainServer__pb2.TestInput.FromString,
+          response_serializer=mainServer__pb2.TestOutput.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'proto.General', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
+
+
+class AccountStub(object):
+  """-------------------------- SERVER ACCOUNT ------------------------------
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.AccountHeartbeat = channel.unary_unary(
+        '/proto.Account/AccountHeartbeat',
+        request_serializer=mainServer__pb2.Empty.SerializeToString,
+        response_deserializer=mainServer__pb2.AccountHeartbeatOutput.FromString,
+        )
+    self.Login = channel.unary_unary(
+        '/proto.Account/Login',
+        request_serializer=mainServer__pb2.LoginInput.SerializeToString,
+        response_deserializer=mainServer__pb2.UserOutput.FromString,
+        )
+    self.Signup = channel.unary_unary(
+        '/proto.Account/Signup',
+        request_serializer=mainServer__pb2.SingupInput.SerializeToString,
+        response_deserializer=mainServer__pb2.SingupOutput.FromString,
+        )
+    self.ResendConfirmCode = channel.unary_unary(
+        '/proto.Account/ResendConfirmCode',
+        request_serializer=mainServer__pb2.ResendConfirmCodeInput.SerializeToString,
+        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+        )
+    self.Confirm = channel.unary_unary(
+        '/proto.Account/Confirm',
+        request_serializer=mainServer__pb2.ConfirmInput.SerializeToString,
+        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+        )
+    self.GetUsersByStatus = channel.unary_unary(
+        '/proto.Account/GetUsersByStatus',
+        request_serializer=mainServer__pb2.GetUsersByStatusInput.SerializeToString,
+        response_deserializer=mainServer__pb2.GetUsersByStatusOutput.FromString,
+        )
+    self.DeleteUser = channel.unary_unary(
+        '/proto.Account/DeleteUser',
+        request_serializer=mainServer__pb2.DeleteUserInput.SerializeToString,
+        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+        )
+    self.UpdateUserStatus = channel.unary_unary(
+        '/proto.Account/UpdateUserStatus',
+        request_serializer=mainServer__pb2.UpdateUserStatusInput.SerializeToString,
+        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
+        )
+
+
+class AccountServicer(object):
+  """-------------------------- SERVER ACCOUNT ------------------------------
+  """
+
+  def AccountHeartbeat(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def Login(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def Signup(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def ResendConfirmCode(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def Confirm(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def GetUsersByStatus(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def DeleteUser(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def UpdateUserStatus(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_AccountServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'AccountHeartbeat': grpc.unary_unary_rpc_method_handler(
+          servicer.AccountHeartbeat,
+          request_deserializer=mainServer__pb2.Empty.FromString,
+          response_serializer=mainServer__pb2.AccountHeartbeatOutput.SerializeToString,
+      ),
+      'Login': grpc.unary_unary_rpc_method_handler(
+          servicer.Login,
+          request_deserializer=mainServer__pb2.LoginInput.FromString,
+          response_serializer=mainServer__pb2.UserOutput.SerializeToString,
+      ),
+      'Signup': grpc.unary_unary_rpc_method_handler(
+          servicer.Signup,
+          request_deserializer=mainServer__pb2.SingupInput.FromString,
+          response_serializer=mainServer__pb2.SingupOutput.SerializeToString,
+      ),
+      'ResendConfirmCode': grpc.unary_unary_rpc_method_handler(
+          servicer.ResendConfirmCode,
+          request_deserializer=mainServer__pb2.ResendConfirmCodeInput.FromString,
+          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+      ),
+      'Confirm': grpc.unary_unary_rpc_method_handler(
+          servicer.Confirm,
+          request_deserializer=mainServer__pb2.ConfirmInput.FromString,
+          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+      ),
+      'GetUsersByStatus': grpc.unary_unary_rpc_method_handler(
+          servicer.GetUsersByStatus,
+          request_deserializer=mainServer__pb2.GetUsersByStatusInput.FromString,
+          response_serializer=mainServer__pb2.GetUsersByStatusOutput.SerializeToString,
+      ),
+      'DeleteUser': grpc.unary_unary_rpc_method_handler(
+          servicer.DeleteUser,
+          request_deserializer=mainServer__pb2.DeleteUserInput.FromString,
+          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+      ),
+      'UpdateUserStatus': grpc.unary_unary_rpc_method_handler(
+          servicer.UpdateUserStatus,
+          request_deserializer=mainServer__pb2.UpdateUserStatusInput.FromString,
+          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'proto.Account', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
+
+
+class DiskprophetStub(object):
+  """------------------------ SERVER DISKPROPHET ---------------------------
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.DPHeartbeat = channel.unary_unary(
+        '/proto.Diskprophet/DPHeartbeat',
+        request_serializer=mainServer__pb2.Empty.SerializeToString,
+        response_deserializer=mainServer__pb2.DPHeartbeatOutput.FromString,
+        )
+    self.DPGetPhysicalDisks = channel.unary_unary(
+        '/proto.Diskprophet/DPGetPhysicalDisks',
+        request_serializer=mainServer__pb2.DPGetPhysicalDisksInput.SerializeToString,
+        response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
+        )
+    self.DPGetDisksPrediction = channel.unary_unary(
+        '/proto.Diskprophet/DPGetDisksPrediction',
+        request_serializer=mainServer__pb2.DPGetDisksPredictionInput.SerializeToString,
+        response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
+        )
+
+
+class DiskprophetServicer(object):
+  """------------------------ SERVER DISKPROPHET ---------------------------
+  """
+
+  def DPHeartbeat(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def DPGetPhysicalDisks(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def DPGetDisksPrediction(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_DiskprophetServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'DPHeartbeat': grpc.unary_unary_rpc_method_handler(
+          servicer.DPHeartbeat,
+          request_deserializer=mainServer__pb2.Empty.FromString,
+          response_serializer=mainServer__pb2.DPHeartbeatOutput.SerializeToString,
+      ),
+      'DPGetPhysicalDisks': grpc.unary_unary_rpc_method_handler(
+          servicer.DPGetPhysicalDisks,
+          request_deserializer=mainServer__pb2.DPGetPhysicalDisksInput.FromString,
+          response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
+      ),
+      'DPGetDisksPrediction': grpc.unary_unary_rpc_method_handler(
+          servicer.DPGetDisksPrediction,
+          request_deserializer=mainServer__pb2.DPGetDisksPredictionInput.FromString,
+          response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'proto.Diskprophet', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
+
+
+class CollectionStub(object):
+  """------------------------ SERVER Collection ---------------------------
+
+  """
+
+  def __init__(self, channel):
+    """Constructor.
+
+    Args:
+      channel: A grpc.Channel.
+    """
+    self.CollectionHeartbeat = channel.unary_unary(
+        '/proto.Collection/CollectionHeartbeat',
+        request_serializer=mainServer__pb2.Empty.SerializeToString,
+        response_deserializer=mainServer__pb2.CollectionHeartbeatOutput.FromString,
+        )
+    self.PostDBRelay = channel.unary_unary(
+        '/proto.Collection/PostDBRelay',
+        request_serializer=mainServer__pb2.PostDBRelayInput.SerializeToString,
+        response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
+        )
+    self.PostMetrics = channel.unary_unary(
+        '/proto.Collection/PostMetrics',
+        request_serializer=mainServer__pb2.PostMetricsInput.SerializeToString,
+        response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
+        )
+
+
+class CollectionServicer(object):
+  """------------------------ SERVER Collection ---------------------------
+
+  """
+
+  def CollectionHeartbeat(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def PostDBRelay(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+  def PostMetrics(self, request, context):
+    # missing associated documentation comment in .proto file
+    pass
+    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
+    context.set_details('Method not implemented!')
+    raise NotImplementedError('Method not implemented!')
+
+
+def add_CollectionServicer_to_server(servicer, server):
+  rpc_method_handlers = {
+      'CollectionHeartbeat': grpc.unary_unary_rpc_method_handler(
+          servicer.CollectionHeartbeat,
+          request_deserializer=mainServer__pb2.Empty.FromString,
+          response_serializer=mainServer__pb2.CollectionHeartbeatOutput.SerializeToString,
+      ),
+      'PostDBRelay': grpc.unary_unary_rpc_method_handler(
+          servicer.PostDBRelay,
+          request_deserializer=mainServer__pb2.PostDBRelayInput.FromString,
+          response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
+      ),
+      'PostMetrics': grpc.unary_unary_rpc_method_handler(
+          servicer.PostMetrics,
+          request_deserializer=mainServer__pb2.PostMetricsInput.FromString,
+          response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
+      ),
+  }
+  generic_handler = grpc.method_handlers_generic_handler(
+      'proto.Collection', rpc_method_handlers)
+  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py b/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py
new file mode 100644 (file)
index 0000000..41997a3
--- /dev/null
@@ -0,0 +1,461 @@
+"""\r
+Ceph database API\r
+\r
+"""\r
+from __future__ import absolute_import\r
+\r
+import json\r
+import rbd\r
+from mgr_module import CommandResult\r
+\r
+GB = 1024 * 1024 * 1024\r
+\r
+\r
+RBD_FEATURES_NAME_MAPPING = {\r
+    rbd.RBD_FEATURE_LAYERING: 'layering',\r
+    rbd.RBD_FEATURE_STRIPINGV2: 'striping',\r
+    rbd.RBD_FEATURE_EXCLUSIVE_LOCK: 'exclusive-lock',\r
+    rbd.RBD_FEATURE_OBJECT_MAP: 'object-map',\r
+    rbd.RBD_FEATURE_FAST_DIFF: 'fast-diff',\r
+    rbd.RBD_FEATURE_DEEP_FLATTEN: 'deep-flatten',\r
+    rbd.RBD_FEATURE_JOURNALING: 'journaling',\r
+    rbd.RBD_FEATURE_DATA_POOL: 'data-pool',\r
+    rbd.RBD_FEATURE_OPERATIONS: 'operations',\r
+}\r
+\r
+\r
+def differentiate(data1, data2):\r
+    """\r
+    # >>> times = [0, 2]\r
+    # >>> values = [100, 101]\r
+    # >>> differentiate(*zip(times, values))\r
+    0.5\r
+    """\r
+    return (data2[1] - data1[1]) / float(data2[0] - data1[0])\r
+\r
+\r
+class ClusterAPI(object):\r
+\r
+    def __init__(self, module_obj):\r
+        self.module = module_obj\r
+\r
+    @staticmethod\r
+    def format_bitmask(features):\r
+        """\r
+        Formats the bitmask:\r
+        # >>> format_bitmask(45)\r
+        ['deep-flatten', 'exclusive-lock', 'layering', 'object-map']\r
+        """\r
+        names = [val for key, val in RBD_FEATURES_NAME_MAPPING.items()\r
+                 if key & features == key]\r
+        return sorted(names)\r
+\r
+    def _open_connection(self, pool_name='device_health_metrics'):\r
+        pools = self.module.rados.list_pools()\r
+        is_pool = False\r
+        for pool in pools:\r
+            if pool == pool_name:\r
+                is_pool = True\r
+                break\r
+        if not is_pool:\r
+            self.module.log.debug('create %s pool' % pool_name)\r
+            # create pool\r
+            result = CommandResult('')\r
+            self.module.send_command(result, 'mon', '', json.dumps({\r
+                'prefix': 'osd pool create',\r
+                'format': 'json',\r
+                'pool': pool_name,\r
+                'pg_num': 1,\r
+            }), '')\r
+            r, outb, outs = result.wait()\r
+            assert r == 0\r
+\r
+            # set pool application\r
+            result = CommandResult('')\r
+            self.module.send_command(result, 'mon', '', json.dumps({\r
+                'prefix': 'osd pool application enable',\r
+                'format': 'json',\r
+                'pool': pool_name,\r
+                'app': 'mgr_devicehealth',\r
+            }), '')\r
+            r, outb, outs = result.wait()\r
+            assert r == 0\r
+\r
+        ioctx = self.module.rados.open_ioctx(pool_name)\r
+        return ioctx\r
+\r
+    @classmethod\r
+    def _rbd_disk_usage(cls, image, snaps, whole_object=True):\r
+        class DUCallback(object):\r
+            def __init__(self):\r
+                self.used_size = 0\r
+\r
+            def __call__(self, offset, length, exists):\r
+                if exists:\r
+                    self.used_size += length\r
+        snap_map = {}\r
+        prev_snap = None\r
+        total_used_size = 0\r
+        for _, size, name in snaps:\r
+            image.set_snap(name)\r
+            du_callb = DUCallback()\r
+            image.diff_iterate(0, size, prev_snap, du_callb,\r
+                               whole_object=whole_object)\r
+            snap_map[name] = du_callb.used_size\r
+            total_used_size += du_callb.used_size\r
+            prev_snap = name\r
+        return total_used_size, snap_map\r
+\r
+    def _rbd_image(self, ioctx, pool_name, image_name):\r
+        with rbd.Image(ioctx, image_name) as img:\r
+            stat = img.stat()\r
+            stat['name'] = image_name\r
+            stat['id'] = img.id()\r
+            stat['pool_name'] = pool_name\r
+            features = img.features()\r
+            stat['features'] = features\r
+            stat['features_name'] = self.format_bitmask(features)\r
+\r
+            # the following keys are deprecated\r
+            del stat['parent_pool']\r
+            del stat['parent_name']\r
+            stat['timestamp'] = '{}Z'.format(img.create_timestamp()\r
+                                             .isoformat())\r
+            stat['stripe_count'] = img.stripe_count()\r
+            stat['stripe_unit'] = img.stripe_unit()\r
+            stat['data_pool'] = None\r
+            try:\r
+                parent_info = img.parent_info()\r
+                stat['parent'] = {\r
+                    'pool_name': parent_info[0],\r
+                    'image_name': parent_info[1],\r
+                    'snap_name': parent_info[2]\r
+                }\r
+            except rbd.ImageNotFound:\r
+                # no parent image\r
+                stat['parent'] = None\r
+            # snapshots\r
+            stat['snapshots'] = []\r
+            for snap in img.list_snaps():\r
+                snap['timestamp'] = '{}Z'.format(\r
+                    img.get_snap_timestamp(snap['id']).isoformat())\r
+                snap['is_protected'] = img.is_protected_snap(snap['name'])\r
+                snap['used_bytes'] = None\r
+                snap['children'] = []\r
+                img.set_snap(snap['name'])\r
+                for child_pool_name, child_image_name in img.list_children():\r
+                    snap['children'].append({\r
+                        'pool_name': child_pool_name,\r
+                        'image_name': child_image_name\r
+                    })\r
+                stat['snapshots'].append(snap)\r
+            # disk usage\r
+            if 'fast-diff' in stat['features_name']:\r
+                snaps = [(s['id'], s['size'], s['name'])\r
+                         for s in stat['snapshots']]\r
+                snaps.sort(key=lambda s: s[0])\r
+                snaps += [(snaps[-1][0]+1 if snaps else 0, stat['size'], None)]\r
+                total_prov_bytes, snaps_prov_bytes = self._rbd_disk_usage(\r
+                    img, snaps, True)\r
+                stat['total_disk_usage'] = total_prov_bytes\r
+                for snap, prov_bytes in snaps_prov_bytes.items():\r
+                    if snap is None:\r
+                        stat['disk_usage'] = prov_bytes\r
+                        continue\r
+                    for ss in stat['snapshots']:\r
+                        if ss['name'] == snap:\r
+                            ss['disk_usage'] = prov_bytes\r
+                            break\r
+            else:\r
+                stat['total_disk_usage'] = None\r
+                stat['disk_usage'] = None\r
+            return stat\r
+\r
+    def get_rbd_list(self, pool_name=None):\r
+        if pool_name:\r
+            pools = [pool_name]\r
+        else:\r
+            pools = []\r
+            for data in self.get_osd_pools():\r
+                pools.append(data['pool_name'])\r
+        result = []\r
+        for pool in pools:\r
+            rbd_inst = rbd.RBD()\r
+            with self._open_connection(str(pool)) as ioctx:\r
+                names = rbd_inst.list(ioctx)\r
+                for name in names:\r
+                    try:\r
+                        stat = self._rbd_image(ioctx, pool_name, name)\r
+                    except rbd.ImageNotFound:\r
+                        continue\r
+                    result.append(stat)\r
+        return result\r
+\r
+    def get_object_pg_info(self, pool_name, object_name):\r
+        result = CommandResult('')\r
+        data_jaon = {}\r
+        self.module.send_command(\r
+            result, 'mon', '', json.dumps({\r
+                'prefix': 'osd map',\r
+                'format': 'json',\r
+                'pool': pool_name,\r
+                'object': object_name,\r
+            }), '')\r
+        ret, outb, outs = result.wait()\r
+        try:\r
+            if outb:\r
+                data_jaon = json.loads(outb)\r
+            else:\r
+                self.module.log.error('unable to get %s pg info' % pool_name)\r
+        except Exception as e:\r
+            self.module.log.error(\r
+                'unable to get %s pg, error: %s' % (pool_name, str(e)))\r
+        return data_jaon\r
+\r
+    @staticmethod\r
+    def _list_objects(ioctx, image_id):\r
+        objects = []\r
+        object_iterator = ioctx.list_objects()\r
+        while True:\r
+            try:\r
+                rados_object = object_iterator.next()\r
+                if image_id is None:\r
+                    objects.append(str(rados_object.key))\r
+                else:\r
+                    v = str(rados_object.key).split('.')\r
+                    if len(v) >= 2 and v[1] == image_id:\r
+                        objects.append(str(rados_object.key))\r
+            except StopIteration:\r
+                break\r
+        return objects\r
+\r
+    def get_rbd_info(self, pool_name, image_name):\r
+        with self._open_connection(pool_name) as ioctx:\r
+            try:\r
+                stat = self._rbd_image(ioctx, pool_name, image_name)\r
+                if stat.get('id'):\r
+                    objects = self._list_objects(ioctx, stat.get('id'))\r
+                    if objects:\r
+                        stat['objects'] = objects\r
+                        stat['pgs'] = list()\r
+                    for obj_name in objects:\r
+                        pgs_data = self.get_object_pg_info(pool_name, obj_name)\r
+                        stat['pgs'].extend([pgs_data])\r
+            except rbd.ImageNotFound:\r
+                stat = {}\r
+        return stat\r
+\r
+    def get_pool_objects(self, pool_name, image_id=None):\r
+        # list_objects\r
+        try:\r
+            with self._open_connection(pool_name) as ioctx:\r
+                objects = self._list_objects(ioctx, image_id)\r
+        except:\r
+            objects = []\r
+        return objects\r
+\r
+    def get_ceph_df_state(self):\r
+        ceph_stats = self.module.get('df').get('stats', {})\r
+        if not ceph_stats:\r
+            return {'total_size': 0, 'avail_size': 0, 'raw_used_size': 0, 'raw_used_percent': 0}\r
+        total_size = round(float(ceph_stats.get('total_bytes', 0)) / GB)\r
+        avail_size = round(float(ceph_stats.get('total_avail_bytes', 0)) / GB, 2)\r
+        raw_used_size = round(float(ceph_stats.get('total_used_bytes', 0)) / GB, 2)\r
+        raw_used_percent = round(float(raw_used_size) / float(total_size) * 100, 2)\r
+        return {'total_size': total_size, 'avail_size': avail_size, 'raw_used_size': raw_used_size,\r
+                'used_percent': raw_used_percent}\r
+\r
+    def get_osd_metadata(self, osd_id=None):\r
+        if osd_id is not None:\r
+            return self.module.get('osd_metadata')[str(osd_id)]\r
+        return self.module.get('osd_metadata')\r
+\r
+    def get_mgr_metadata(self, mgr_id):\r
+        return self.module.get_metadata('mgr', mgr_id)\r
+\r
+    def get_osd_epoch(self):\r
+        return self.module.get('osd_map').get('epoch', 0)\r
+\r
+    def get_osds(self):\r
+        return self.module.get('osd_map').get('osds', [])\r
+\r
+    def get_max_osd(self):\r
+        return self.module.get('osd_map').get('max_osd', '')\r
+\r
+    def get_osd_pools(self):\r
+        return self.module.get('osd_map').get('pools', [])\r
+\r
+    def get_pool_bytes_used(self, pool_id):\r
+        bytes_used = None\r
+        pools = self.module.get('df').get('pools', [])\r
+        for pool in pools:\r
+            if pool_id == pool['id']:\r
+                bytes_used = pool['stats']['bytes_used']\r
+        return bytes_used\r
+\r
+    def get_cluster_id(self):\r
+        return self.module.get('mon_map').get('fsid')\r
+\r
+    def get_health_status(self):\r
+        health = json.loads(self.module.get('health')['json'])\r
+        return health.get('status')\r
+\r
+    def get_health_checks(self):\r
+        health = json.loads(self.module.get('health')['json'])\r
+        if health.get('checks'):\r
+            message = ''\r
+            checks = health['checks']\r
+            for key in checks.keys():\r
+                if message:\r
+                    message += ';'\r
+                if checks[key].get('summary', {}).get('message', ''):\r
+                    message += checks[key]['summary']['message']\r
+            return message\r
+        else:\r
+            return ''\r
+\r
+    def get_mons(self):\r
+        return self.module.get('mon_map').get('mons', [])\r
+\r
+    def get_mon_status(self):\r
+        mon_status = json.loads(self.module.get('mon_status')['json'])\r
+        return mon_status\r
+\r
+    def get_osd_smart(self, osd_id, device_id=None):\r
+        osd_devices = []\r
+        osd_smart = {}\r
+        devices = self.module.get('devices')\r
+        for dev in devices.get('devices', []):\r
+            osd = ''\r
+            daemons = dev.get('daemons', [])\r
+            for daemon in daemons:\r
+                if daemon[4:] != str(osd_id):\r
+                    continue\r
+                osd = daemon\r
+            if not osd:\r
+                continue\r
+            if dev.get('devid') and dev.get('devid') not in osd_devices:\r
+                osd_devices.append(dev.get('devid'))\r
+        for dev_id in osd_devices:\r
+            o_key = ''\r
+            if device_id and dev_id != device_id:\r
+                continue\r
+            smart_data = self.get_device_health(dev_id)\r
+            if smart_data:\r
+                o_key = sorted(smart_data.keys(), reverse=True)[0]\r
+            if o_key and smart_data and smart_data.values():\r
+                dev_smart = smart_data[o_key]\r
+                if dev_smart:\r
+                    osd_smart[dev_id] = dev_smart\r
+        return osd_smart\r
+\r
+    def get_device_health(self, devid):\r
+        health_data = {}\r
+        try:\r
+            r, outb, outs = self.module.remote('devicehealth', 'show_device_metrics', devid=devid, sample='')\r
+            if r != 0:\r
+                self.module.log.error('failed to get device %s health', devid)\r
+                health_data = {}\r
+            else:\r
+                health_data = json.loads(outb)\r
+        except Exception as e:\r
+            self.module.log.error('failed to get device %s health data due to %s', devid, str(e))\r
+        return health_data\r
+\r
+    def get_osd_hostname(self, osd_id):\r
+        result = ''\r
+        osd_metadata = self.get_osd_metadata(osd_id)\r
+        if osd_metadata:\r
+            osd_host = osd_metadata.get('hostname', 'None')\r
+            result = osd_host\r
+        return result\r
+\r
+    def get_osd_device_id(self, osd_id):\r
+        result = {}\r
+        if not str(osd_id).isdigit():\r
+            if str(osd_id)[0:4] == 'osd.':\r
+                osdid = osd_id[4:]\r
+            else:\r
+                raise Exception('not a valid <osd.NNN> id or number')\r
+        else:\r
+            osdid = osd_id\r
+        osd_metadata = self.get_osd_metadata(osdid)\r
+        if osd_metadata:\r
+            osd_device_ids = osd_metadata.get('device_ids', '')\r
+            if osd_device_ids:\r
+                result = {}\r
+                for osd_device_id in osd_device_ids.split(','):\r
+                    dev_name = ''\r
+                    if len(str(osd_device_id).split('=')) >= 2:\r
+                        dev_name = osd_device_id.split('=')[0]\r
+                        dev_id = osd_device_id.split('=')[1]\r
+                    else:\r
+                        dev_id = osd_device_id\r
+                    if dev_name:\r
+                        result[dev_name] = {'dev_id': dev_id}\r
+        return result\r
+\r
+    def get_file_systems(self):\r
+        return self.module.get('fs_map').get('filesystems', [])\r
+\r
+    def set_device_life_expectancy(self, device_id, from_date, to_date=None):\r
+        result = CommandResult('')\r
+\r
+        if to_date is None:\r
+            self.module.send_command(result, 'mon', '', json.dumps({\r
+                'prefix': 'device set-life-expectancy',\r
+                'devid': device_id,\r
+                'from': from_date\r
+            }), '')\r
+        else:\r
+            self.module.send_command(result, 'mon', '', json.dumps({\r
+                'prefix': 'device set-life-expectancy',\r
+                'devid': device_id,\r
+                'from': from_date,\r
+                'to': to_date\r
+            }), '')\r
+        ret, outb, outs = result.wait()\r
+        if ret != 0:\r
+            self.module.log.error(\r
+                'failed to set device life expectancy, %s' % outs)\r
+        return ret\r
+\r
+    def reset_device_life_expectancy(self, device_id):\r
+        result = CommandResult('')\r
+        self.module.send_command(result, 'mon', '', json.dumps({\r
+            'prefix': 'device rm-life-expectancy',\r
+            'devid': device_id\r
+        }), '')\r
+        ret, outb, outs = result.wait()\r
+        if ret != 0:\r
+            self.module.log.error(\r
+                'failed to reset device life expectancy, %s' % outs)\r
+        return ret\r
+\r
+    def get_server(self, hostname):\r
+        return self.module.get_server(hostname)\r
+\r
+    def get_configuration(self, key):\r
+        return self.module.get_configuration(key)\r
+\r
+    def get_rate(self, svc_type, svc_name, path):\r
+        """returns most recent rate"""\r
+        data = self.module.get_counter(svc_type, svc_name, path)[path]\r
+\r
+        if data and len(data) > 1:\r
+            return differentiate(*data[-2:])\r
+        return 0.0\r
+\r
+    def get_latest(self, daemon_type, daemon_name, counter):\r
+        return self.module.get_latest(daemon_type, daemon_name, counter)\r
+\r
+    def get_pgs_up_by_poolid(self, poolid):\r
+        pgs = {}\r
+        try:\r
+            osd_map = self.module.get_osdmap()\r
+            if not osd_map:\r
+                return {}\r
+            pgs = osd_map.map_pool_pgs_up(int(poolid))\r
+            return pgs\r
+        except:\r
+            return {}\r
diff --git a/src/pybind/mgr/diskprediction_cloud/common/cypher.py b/src/pybind/mgr/diskprediction_cloud/common/cypher.py
new file mode 100644 (file)
index 0000000..7b7b60e
--- /dev/null
@@ -0,0 +1,71 @@
+from __future__ import absolute_import
+
+import time
+
+
+class NodeInfo(object):
+    """ Neo4j Node information """
+    def __init__(self, label, domain_id, name, meta):
+        self.label = label
+        self.domain_id = domain_id
+        self.name = name
+        self.meta = meta
+
+
+class CypherOP(object):
+    """ Cypher Operation """
+
+    @staticmethod
+    def update(node, key, value, timestamp=int(time.time()*(1000**3))):
+        result = ''
+        if isinstance(node, NodeInfo):
+            if key != 'time':
+                cy_value = '\'%s\'' % value
+            else:
+                cy_value = value
+            result = \
+                'set %s.%s=case when %s.time >= %s then %s.%s ELSE %s end' % (
+                    node.label, key, node.label, timestamp, node.label, key,
+                    cy_value)
+        return result
+
+    @staticmethod
+    def create_or_merge(node, timestamp=int(time.time()*(1000**3))):
+        result = ''
+        if isinstance(node, NodeInfo):
+            meta_list = []
+            if isinstance(node.meta, dict):
+                for key, value in node.meta.items():
+                    meta_list.append(CypherOP.update(node, key, value, timestamp))
+            domain_id = '{domainId:\'%s\'}' % node.domain_id
+            if meta_list:
+                result = 'merge (%s:%s %s) %s %s %s' % (
+                    node.label, node.label,
+                    domain_id,
+                    CypherOP.update(node, 'name', node.name, timestamp),
+                    ' '.join(meta_list),
+                    CypherOP.update(node, 'time', timestamp, timestamp))
+            else:
+                result = 'merge (%s:%s %s) %s %s' % (
+                    node.label, node.label,
+                    domain_id,
+                    CypherOP.update(node, 'name', node.name, timestamp),
+                    CypherOP.update(node, 'time', timestamp, timestamp))
+        return result
+
+    @staticmethod
+    def add_link(snode, dnode, relationship, timestamp=None):
+        result = ''
+        if timestamp is None:
+            timestamp = int(time.time()*(1000**3))
+        if isinstance(snode, NodeInfo) and isinstance(dnode, NodeInfo):
+            cy_snode = CypherOP.create_or_merge(snode, timestamp)
+            cy_dnode = CypherOP.create_or_merge(dnode, timestamp)
+            target = snode.label + dnode.label
+            link = 'merge (%s)-[%s:%s]->(%s) set %s.time=case when %s.time >= %s then %s.time ELSE %s end' % (
+                snode.label, target, relationship,
+                dnode.label, target,
+                target, timestamp,
+                target, timestamp)
+            result = '%s %s %s' % (cy_snode, cy_dnode, link)
+        return result
diff --git a/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py b/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py
new file mode 100644 (file)
index 0000000..9a9f0a1
--- /dev/null
@@ -0,0 +1,238 @@
+# vim: tabstop=4 shiftwidth=4 softtabstop=4
+import grpc
+import json
+from logging import getLogger
+import time
+
+from . import DummyResonse
+from . import client_pb2
+from . import client_pb2_grpc
+
+
+def gen_configuration(**kwargs):
+    configuration = {
+        'host': kwargs.get('host', 'api.diskprophet.com'),
+        'user': kwargs.get('user'),
+        'password': kwargs.get('password'),
+        'port': kwargs.get('port', 31400),
+        'mgr_inst': kwargs.get('mgr_inst', None),
+        'cert_context': kwargs.get('cert_context'),
+        'ssl_target_name': kwargs.get('ssl_target_name', 'api.diskprophet.com'),
+        'default_authority': kwargs.get('default_authority', 'api.diskprophet.com')}
+    return configuration
+
+
+class GRPcClient:
+
+    def __init__(self, configuration):
+        self.auth = None
+        self.host = configuration.get('host')
+        self.port = configuration.get('port')
+        if configuration.get('user') and configuration.get('password'):
+            self.auth = (
+                ('account', configuration.get('user')),
+                ('password', configuration.get('password')))
+        self.cert_context = configuration.get('cert_context')
+        self.ssl_target_name = configuration.get('ssl_target_name')
+        self.default_authority = configuration.get('default_authority')
+        self.mgr_inst = configuration.get('mgr_inst')
+        if self.mgr_inst:
+            self._logger = self.mgr_inst.log
+        else:
+            self._logger = getLogger()
+        self._client = self._get_channel()
+
+    def close(self):
+        if self._client:
+            self._client.close()
+
+    @staticmethod
+    def connectivity_update(connectivity):
+        pass
+
+    def _get_channel(self):
+        try:
+            creds = grpc.ssl_channel_credentials(
+                root_certificates=self.cert_context)
+            channel = \
+                grpc.secure_channel('{}:{}'.format(
+                    self.host, self.port), creds,
+                    options=(('grpc.ssl_target_name_override', self.ssl_target_name,),
+                             ('grpc.default_authority', self.default_authority),))
+            channel.subscribe(self.connectivity_update, try_to_connect=True)
+            return channel
+        except Exception as e:
+            self._logger.error(
+                'failed to create connection exception: {}'.format(
+                    ';'.join(str(e).split('\n\t'))))
+            return None
+
+    def test_connection(self):
+        try:
+            stub_accout = client_pb2_grpc.AccountStub(self._client)
+            result = stub_accout.AccountHeartbeat(client_pb2.Empty())
+            if result and "is alive" in str(result.message):
+                return True
+            else:
+                return False
+        except Exception as e:
+            self._logger.error(
+                'failed to test connection exception: {}'.format(
+                    ';'.join(str(e).split('\n\t'))))
+            return False
+
+    def _send_metrics(self, data, measurement):
+        status_info = dict()
+        status_info['measurement'] = None
+        status_info['success_count'] = 0
+        status_info['failure_count'] = 0
+        for dp_data in data:
+            d_measurement = dp_data.measurement
+            if not d_measurement:
+                status_info['measurement'] = measurement
+            else:
+                status_info['measurement'] = d_measurement
+            tag_list = []
+            field_list = []
+            for name in dp_data.tags:
+                tag = '{}={}'.format(name, dp_data.tags[name])
+                tag_list.append(tag)
+            for name in dp_data.fields:
+                if dp_data.fields[name] is None:
+                    continue
+                if isinstance(dp_data.fields[name], str):
+                    field = '{}=\"{}\"'.format(name, dp_data.fields[name])
+                elif isinstance(dp_data.fields[name], bool):
+                    field = '{}={}'.format(name,
+                                           str(dp_data.fields[name]).lower())
+                elif (isinstance(dp_data.fields[name], int) or
+                      isinstance(dp_data.fields[name], long)):
+                    field = '{}={}i'.format(name, dp_data.fields[name])
+                else:
+                    field = '{}={}'.format(name, dp_data.fields[name])
+                field_list.append(field)
+            data = '{},{} {} {}'.format(
+                status_info['measurement'],
+                ','.join(tag_list),
+                ','.join(field_list),
+                int(time.time() * 1000 * 1000 * 1000))
+            try:
+                resp = self._send_info(data=[data], measurement=status_info['measurement'])
+                status_code = resp.status_code
+                if 200 <= status_code < 300:
+                    self._logger.debug(
+                        '{} send diskprediction api success(ret: {})'.format(
+                            status_info['measurement'], status_code))
+                    status_info['success_count'] += 1
+                else:
+                    self._logger.error(
+                        'return code: {}, content: {}'.format(
+                            status_code, resp.content))
+                    status_info['failure_count'] += 1
+            except Exception as e:
+                status_info['failure_count'] += 1
+                self._logger.error(str(e))
+        return status_info
+
+    def _send_db_relay(self, data, measurement):
+        status_info = dict()
+        status_info['measurement'] = measurement
+        status_info['success_count'] = 0
+        status_info['failure_count'] = 0
+        for dp_data in data:
+            try:
+                resp = self._send_info(
+                    data=[dp_data.fields['cmd']], measurement=measurement)
+                status_code = resp.status_code
+                if 200 <= status_code < 300:
+                    self._logger.debug(
+                        '{} send diskprediction api success(ret: {})'.format(
+                            measurement, status_code))
+                    status_info['success_count'] += 1
+                else:
+                    self._logger.error(
+                        'return code: {}, content: {}'.format(
+                            status_code, resp.content))
+                    status_info['failure_count'] += 1
+            except Exception as e:
+                status_info['failure_count'] += 1
+                self._logger.error(str(e))
+        return status_info
+
+    def send_info(self, data, measurement):
+        """
+        :param data: data structure
+        :param measurement: data measurement class name
+        :return:
+            status_info = {
+                'success_count': <count>,
+                'failure_count': <count>
+            }
+        """
+        if measurement == 'db_relay':
+            return self._send_db_relay(data, measurement)
+        else:
+            return self._send_metrics(data, measurement)
+
+    def _send_info(self, data, measurement):
+        resp = DummyResonse()
+        try:
+            stub_collection = client_pb2_grpc.CollectionStub(self._client)
+            if measurement == 'db_relay':
+                result = stub_collection.PostDBRelay(
+                    client_pb2.PostDBRelayInput(cmds=data), metadata=self.auth)
+            else:
+                result = stub_collection.PostMetrics(
+                    client_pb2.PostMetricsInput(points=data), metadata=self.auth)
+            if result and 'success' in str(result.message).lower():
+                resp.status_code = 200
+                resp.content = ''
+            else:
+                resp.status_code = 400
+                resp.content = ';'.join(str(result).split('\n\t'))
+                self._logger.error(
+                    'failed to send info: {}'.format(resp.content))
+        except Exception as e:
+            resp.status_code = 400
+            resp.content = ';'.join(str(e).split('\n\t'))
+            self._logger.error(
+                'failed to send info exception: {}'.format(resp.content))
+        return resp
+
+    def query_info(self, host_domain_id, disk_domain_id, measurement):
+        resp = DummyResonse()
+        try:
+            stub_dp = client_pb2_grpc.DiskprophetStub(self._client)
+            predicted = stub_dp.DPGetDisksPrediction(
+                client_pb2.DPGetDisksPredictionInput(
+                    physicalDiskIds=disk_domain_id),
+                metadata=self.auth)
+            if predicted and hasattr(predicted, 'data'):
+                resp.status_code = 200
+                resp.content = ''
+                resp_json = json.loads(predicted.data)
+                rc = resp_json.get('results', [])
+                if rc:
+                    series = rc[0].get('series', [])
+                    if series:
+                        values = series[0].get('values', [])
+                        if not values:
+                            resp.resp_json = {}
+                        else:
+                            columns = series[0].get('columns', [])
+                            for item in values:
+                                # get prediction key and value from server.
+                                for name, value in zip(columns, item):
+                                    # process prediction data
+                                    resp.resp_json[name] = value
+                return resp
+            else:
+                resp.status_code = 400
+                resp.content = ''
+                resp.resp_json = {'error': ';'.join(str(predicted).split('\n\t'))}
+                return resp
+        except Exception as e:
+            resp.status_code = 400
+            resp.content = ';'.join(str(e).split('\n\t'))
+            resp.resp_json = {'error': resp.content}
+            return resp
diff --git a/src/pybind/mgr/diskprediction_cloud/common/server.crt b/src/pybind/mgr/diskprediction_cloud/common/server.crt
new file mode 100644 (file)
index 0000000..d72c9d2
--- /dev/null
@@ -0,0 +1,17 @@
+-----BEGIN CERTIFICATE-----
+MIICsjCCAZoCCQCKLjrHOzCTrDANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDDBBh
+cGkuZmVkZXJhdG9yLmFpMB4XDTE4MDgwMjA2NDg0N1oXDTI4MDczMDA2NDg0N1ow
+GzEZMBcGA1UEAwwQYXBpLmZlZGVyYXRvci5haTCCASIwDQYJKoZIhvcNAQEBBQAD
+ggEPADCCAQoCggEBAJkDL/VoLbI+Rc1GXkZwpN8n4e7HhIk1iK98yhXegoH8g6ZZ
+uVVlUW/zNO0V5W9IgiSBqEWOEf9heWj7mIbbxl437W1LpR4V0LKR2dbY7ZMwlB3L
+ZJYxtziZYu1g4Fn9hDnVJIXVmXFpF62wHd2ZSY7FyUF/OGetxLSfoOMkTHY8A8HB
+92vQfoFjgx1e23lLgTO2VpucmU/qXiF+xI/K6kkrMnGJi4xBL29i3aKRRNktVUHf
+Zs6JhBKl4sbvkW5m5AECW4c0XxVJotTLoPUjx4rxp0k5S1aQSYSS+0z96eVY0w8J
+ungiWEj7lLqwEGKjOzfjDLsczZIcZZcQSQwb3qcCAwEAATANBgkqhkiG9w0BAQsF
+AAOCAQEADwfBrHsvPmUD8CTx8lpVcqrOlHc7ftW3hb11vWwwfJw4fBiJ8DoB496x
+SAP2CJyDnSLdyvVueKLjiRFBm96W76nbMeP9+CkktGRUbLjkByv/v+7WSxRrukDC
+yR6IXqQJe4ADcYkVYoUMx3frBQzFtS7hni0FPvl3AN55TvTXqed61CdN9zdw9Ezn
+yn0oy3BbT5h/zNHefTQBzgQhW62C5YdTRtS6VVWV/k1kLz0GVG1eMtAqueUCxFeM
+g1mXYz2/Cm5C8pszZfiP+a/QV1z/3QgRUp0i0yVLiteqNDCPv6bc767VQEuXok9p
+NDuKElVxdA0WD9cbnBXiyfeMOQnjQw==
+-----END CERTIFICATE-----
diff --git a/src/pybind/mgr/diskprediction_cloud/module.py b/src/pybind/mgr/diskprediction_cloud/module.py
new file mode 100644 (file)
index 0000000..204184b
--- /dev/null
@@ -0,0 +1,437 @@
+"""
+diskprediction with cloud predictor
+"""
+from __future__ import absolute_import
+
+from datetime import datetime
+import errno
+import json
+from mgr_module import MgrModule
+import os
+from threading import Event
+
+from .common import DP_MGR_STAT_ENABLED, DP_MGR_STAT_DISABLED
+from .task import MetricsRunner, SmartRunner, TestRunner
+
+TIME_DAYS = 24*60*60
+TIME_WEEK = TIME_DAYS * 7
+DP_AGENTS = [MetricsRunner, SmartRunner]
+
+
+class Module(MgrModule):
+
+    OPTIONS = [
+        {
+            'name': 'diskprediction_server',
+            'default': ''
+        },
+        {
+            'name': 'diskprediction_port',
+            'default': '31400'
+        },
+        {
+            'name': 'diskprediction_user',
+            'default': ''
+        },
+        {
+            'name': 'diskprediction_password',
+            'default': ''
+        },
+        {
+            'name': 'diskprediction_upload_metrics_interval',
+            'default': '600'
+        },
+        {
+            'name': 'diskprediction_upload_smart_interval',
+            'default': '43200'
+        },
+        {
+            'name': 'diskprediction_retrieve_prediction_interval',
+            'default': '43200'
+        },
+        {
+            'name': 'diskprediction_cert_context',
+            'default': ''
+        },
+        {
+            'name': 'diskprediction_ssl_target_name_override',
+            'default': 'api.diskprophet.com'
+        },
+        {
+            'name': 'diskprediction_default_authority',
+            'default': 'api.diskprophet.com'
+        },
+        {
+            'name': 'sleep_interval',
+            'default': str(600),
+        },
+        {
+            'name': 'predict_interval',
+            'default': str(86400),
+        }
+    ]
+
+    COMMANDS = [
+        {
+            'cmd': 'device show-prediction-config',
+            'desc': 'Prints diskprediction configuration',
+            'perm': 'r'
+        },
+        {
+            'cmd': 'device set-cloud-prediction-config '
+                   'name=server,type=CephString,req=true '
+                   'name=user,type=CephString,req=true '
+                   'name=password,type=CephString,req=true '
+                   'name=certfile,type=CephString,req=true '
+                   'name=port,type=CephString,req=false ',
+            'desc': 'Configure Disk Prediction service',
+            'perm': 'rw'
+        },
+        {
+            'cmd': 'device debug metrics-forced',
+            'desc': 'Run metrics agent forced',
+            'perm': 'r'
+        },
+        {
+            'cmd': 'device debug smart-forced',
+            'desc': 'Run smart agent forced',
+            'perm': 'r'
+        },
+        {
+            'cmd': 'diskprediction_cloud status',
+            'desc': 'Check diskprediction_cloud status',
+            'perm': 'r'
+        }
+    ]
+
+    def __init__(self, *args, **kwargs):
+        super(Module, self).__init__(*args, **kwargs)
+        self.status = {'status': DP_MGR_STAT_DISABLED}
+        self.shutdown_event = Event()
+        self._agents = []
+        self._activated_cloud = False
+        self._prediction_result = {}
+        self.config = dict()
+
+    @property
+    def config_keys(self):
+        return dict((o['name'], o.get('default', None)) for o in self.OPTIONS)
+
+    def set_config_option(self, option, value):
+        if option not in self.config_keys.keys():
+            raise RuntimeError('{0} is a unknown configuration '
+                               'option'.format(option))
+
+        if option in ['diskprediction_port',
+                      'diskprediction_upload_metrics_interval',
+                      'diskprediction_upload_smart_interval',
+                      'diskprediction_retrieve_prediction_interval']:
+            if not str(value).isdigit():
+                raise RuntimeError('invalid {} configured. Please specify '
+                                   'a valid integer {}'.format(option, value))
+
+        self.log.debug('Setting in-memory config option %s to: %s', option,
+                       value)
+        self.set_config(option, value)
+        self.config[option] = value
+
+        return True
+
+    def get_configuration(self, key):
+        return self.get_config(key, self.config_keys[key])
+
+    @staticmethod
+    def _convert_timestamp(predicted_timestamp, life_expectancy_day):
+        """
+        :param predicted_timestamp: unit is nanoseconds
+        :param life_expectancy_day: unit is seconds
+        :return:
+            date format '%Y-%m-%d' ex. 2018-01-01
+        """
+        return datetime.fromtimestamp(
+            predicted_timestamp / (1000 ** 3) + life_expectancy_day).strftime('%Y-%m-%d')
+
+    def _show_prediction_config(self, cmd):
+        self.show_module_config()
+        return 0, json.dumps(self.config, indent=4), ''
+
+    def _set_ssl_target_name(self, cmd):
+        str_ssl_target = cmd.get('ssl_target_name', '')
+        try:
+            self.set_config('diskprediction_ssl_target_name_override', str_ssl_target)
+            return (0,
+                    'success to config ssl target name', 0)
+        except Exception as e:
+            return -errno.EINVAL, '', str(e)
+
+    def _set_ssl_default_authority(self, cmd):
+        str_ssl_authority = cmd.get('ssl_authority', '')
+        try:
+            self.set_config('diskprediction_default_authority', str_ssl_authority)
+            return 0, 'success to config ssl default authority', 0
+        except Exception as e:
+            return -errno.EINVAL, '', str(e)
+
+    def _set_cloud_prediction_config(self, cmd):
+        str_cert_path = cmd.get('certfile', '')
+        if os.path.exists(str_cert_path):
+            with open(str_cert_path, 'rb') as f:
+                trusted_certs = f.read()
+            self.set_config_option(
+                'diskprediction_cert_context', trusted_certs)
+            for _agent in self._agents:
+                _agent.event.set()
+            self.set_config('diskprediction_server', cmd['server'])
+            self.set_config('diskprediction_user', cmd['user'])
+            self.set_config('diskprediction_password', cmd['password'])
+            if cmd.get('port'):
+                self.set_config('diskprediction_port', cmd['port'])
+            return 0, 'succeed to config cloud mode connection', ''
+        else:
+            return -errno.EINVAL, '', 'certification file not existed'
+
+    def _debug_metrics_forced(self, cmd):
+        msg = ''
+        for _agent in self._agents:
+            if isinstance(_agent, MetricsRunner):
+                msg = 'run metrics agent successfully'
+                _agent.event.set()
+        return 0, msg, ''
+
+    def _debug_smart_forced(self, cmd):
+        msg = ' '
+        for _agent in self._agents:
+            if isinstance(_agent, SmartRunner):
+                msg = 'run smart agent successfully'
+                _agent.event.set()
+        return 0, msg, ''
+
+    def refresh_config(self):
+        for opt in self.OPTIONS:
+            setattr(self,
+                    opt['name'],
+                    self.get_config(opt['name']) or opt['default'])
+            self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
+
+    def _status(self,  cmd):
+        return 0, json.dumps(self.status), ''
+
+    def _get_cloud_prediction_result(self, host_domain_id, disk_domain_id):
+        result = {}
+        obj_sender = None
+        from .common.grpcclient import GRPcClient, gen_configuration
+        conf = gen_configuration(
+            host=self.get_configuration('diskprediction_server'),
+            user=self.get_configuration('diskprediction_user'),
+            password=self.get_configuration('diskprediction_password'),
+            port=self.get_configuration('diskprediction_port'),
+            cert_context=self.get_configuration('diskprediction_cert_context'),
+            mgr_inst=self,
+            ssl_target_name=self.get_configuration('diskprediction_ssl_target_name_override'),
+            default_authority=self.get_configuration('diskprediction_default_authority'))
+        try:
+            obj_sender = GRPcClient(conf)
+            if obj_sender:
+                query_info = obj_sender.query_info(host_domain_id, disk_domain_id, 'sai_disk_prediction')
+                status_code = query_info.status_code
+                if status_code == 200:
+                    result = query_info.json()
+                else:
+                    resp = query_info.json()
+                    if resp.get('error'):
+                        self.log.error(str(resp['error']))
+        finally:
+            if obj_sender:
+                obj_sender.close()
+        return result
+
+    def predict_life_expectancy(self, devid):
+        assert devid
+        self.refresh_config()
+        prediction_data = {}
+        result = self.get('device {}'.format(devid))
+        if not result:
+            return -1, '', 'device {} not found'.format(devid)
+        dev_info = result.get('device', {})
+        if not dev_info:
+            return -1, '', 'device {} not found'.format(devid)
+        cluster_id = self.get('mon_map').get('fsid', '')
+        for location in dev_info.get('location', []):
+            host = location.get('host')
+            host_domain_id = '{}_{}'.format(cluster_id, host)
+            prediction_data = self._get_cloud_prediction_result(host_domain_id, devid)
+            if prediction_data:
+                break
+        if not prediction_data:
+            return -1, '', 'device {} prediction data not ready'.format(devid)
+        elif prediction_data.get('near_failure', '').lower() == 'good':
+            return 0, '>6w', ''
+        elif prediction_data.get('near_failure', '').lower() == 'warning':
+            return 0, '>=2w and <=6w', ''
+        elif prediction_data.get('near_failure', '').lower() == 'bad':
+            return 0, '<2w', ''
+        else:
+            return 0, 'unknown', ''
+
+    def _update_device_life_expectancy_day(self, devid, prediction):
+        # Update osd life-expectancy
+        from .common.clusterdata import ClusterAPI
+        predicted = None
+        life_expectancy_day_min = None
+        life_expectancy_day_max = None
+        if prediction.get('predicted'):
+            predicted = int(prediction['predicted'])
+        if prediction.get('near_failure'):
+            if prediction['near_failure'].lower() == 'good':
+                life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS
+                life_expectancy_day_max = None
+            elif prediction['near_failure'].lower() == 'warning':
+                life_expectancy_day_min = (TIME_WEEK * 2)
+                life_expectancy_day_max = (TIME_WEEK * 6)
+            elif prediction['near_failure'].lower() == 'bad':
+                life_expectancy_day_min = 0
+                life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS
+            else:
+                # Near failure state is unknown.
+                predicted = None
+                life_expectancy_day_min = None
+                life_expectancy_day_max = None
+
+        obj_api = ClusterAPI(self)
+        if predicted and devid and life_expectancy_day_min is not None:
+            from_date = None
+            to_date = None
+            try:
+                if life_expectancy_day_min is not None:
+                    from_date = self._convert_timestamp(predicted, life_expectancy_day_min)
+
+                if life_expectancy_day_max is not None:
+                    to_date = self._convert_timestamp(predicted, life_expectancy_day_max)
+
+                obj_api.set_device_life_expectancy(devid, from_date, to_date)
+                self.log.info(
+                    'succeed to set device {} life expectancy from: {}, to: {}'.format(
+                        devid, from_date, to_date))
+            except Exception as e:
+                self.log.error(
+                    'failed to set device {} life expectancy from: {}, to: {}, {}'.format(
+                        devid, from_date, to_date, str(e)))
+        else:
+            obj_api.reset_device_life_expectancy(devid)
+
+    def predict_all_devices(self):
+        if not self._activated_cloud:
+            return -1, '', 'diskprecition_cloud not ready'
+        prediction_data = {}
+        self.refresh_config()
+        result = self.get('devices')
+        cluster_id = self.get('mon_map').get('fsid')
+        if not result:
+            return -1, '', 'unable to get all devices for prediction'
+        for dev in result.get('devices', []):
+            for location in dev.get('location', []):
+                host = location.get('host')
+                host_domain_id = '{}_{}'.format(cluster_id, host)
+                prediction_data = self._get_cloud_prediction_result(host_domain_id, dev.get('devid'))
+                if prediction_data:
+                    break
+            if not prediction_data:
+                return -1, '', 'device {} prediction data not ready'.format(dev.get('devid'))
+            else:
+                self._update_device_life_expectancy_day(dev.get('devid'), prediction_data)
+        return 0, '', ''
+
+    def handle_command(self, _, cmd):
+        for o_cmd in self.COMMANDS:
+            if cmd['prefix'] == o_cmd['cmd'][:len(cmd['prefix'])]:
+                fun_name = ''
+                avgs = o_cmd['cmd'].split(' ')
+                for avg in avgs:
+                    if avg.lower() == 'diskprediction_cloud':
+                        continue
+                    if avg.lower() == 'device':
+                        continue
+                    if '=' in avg or ',' in avg or not avg:
+                        continue
+                    fun_name += '_%s' % avg.replace('-', '_')
+                if fun_name:
+                    fun = getattr(
+                        self, fun_name)
+                    if fun:
+                        return fun(cmd)
+        return -errno.EINVAL, '', 'cmd not found'
+
+    def show_module_config(self):
+        for key, default in self.config_keys.items():
+            self.set_config_option(key, self.get_config(key, default))
+
+    def serve(self):
+        self.log.info('Starting diskprediction module')
+        self.status = {'status': DP_MGR_STAT_ENABLED}
+
+        while True:
+            self.refresh_config()
+            mode = self.get_option('device_failure_prediction_mode')
+            if mode == 'cloud':
+                if not self._activated_cloud:
+                    self.start_cloud_disk_prediction()
+            else:
+                if self._activated_cloud:
+                    self.stop_disk_prediction()
+
+            # Check agent hang is?
+            restartAgent = False
+            try:
+                for dp_agent in self._agents:
+                    if dp_agent.is_timeout():
+                        self.log.error('agent name: {] timeout'.format(dp_agent.task_name))
+                        restartAgent = True
+                        break
+            except Exception as IOError:
+                self.log.error('disk prediction plugin faield to started and try to restart')
+                restartAgent = True
+
+            if restartAgent:
+                self.stop_disk_prediction()
+            else:
+                sleep_interval = int(self.sleep_interval) or 60
+                self.shutdown_event.wait(sleep_interval)
+                if self.shutdown_event.is_set():
+                    break
+        self.stop_disk_prediction()
+
+    def start_cloud_disk_prediction(self):
+        assert not self._activated_cloud
+        for dp_agent in DP_AGENTS:
+            obj_agent = dp_agent(self, 300)
+            if obj_agent:
+                obj_agent.start()
+            else:
+                raise Exception('failed to start task %s' % obj_agent.task_name)
+            self._agents.append(obj_agent)
+        self._activated_cloud = True
+        self.log.info('start cloud disk prediction')
+
+    def stop_disk_prediction(self):
+        assert self._activated_cloud
+        try:
+            self.status = {'status': DP_MGR_STAT_DISABLED}
+            while self._agents:
+                dp_agent = self._agents.pop()
+                self.log.info('agent name: {}'.format(dp_agent.task_name))
+                dp_agent.terminate()
+                dp_agent.join(5)
+                del dp_agent
+            self._activated_cloud = False
+            self.log.info('stop disk prediction')
+        except Exception as IOError:
+            self.log.error('failed to stop disk prediction clould plugin')
+
+    def shutdown(self):
+        self.shutdown_event.set()
+        super(Module, self).shutdown()
+
+    def self_test(self):
+        objTest = TestRunner(self)
+        objTest.run()
+        self.log.info('self test completed')
diff --git a/src/pybind/mgr/diskprediction_cloud/requirements.txt b/src/pybind/mgr/diskprediction_cloud/requirements.txt
new file mode 100644 (file)
index 0000000..3a22a07
--- /dev/null
@@ -0,0 +1,14 @@
+google==2.0.1
+google-api-python-client==1.7.3
+google-auth==1.5.0
+google-auth-httplib2==0.0.3
+google-gax==0.12.5
+googleapis-common-protos==1.5.3
+grpc==0.3.post19
+grpc-google-logging-v2==0.8.1
+grpc-google-pubsub-v1==0.8.1
+grpcio==1.14.1
+mock==2.0.0
+numpy==1.15.1
+scikit-learn==0.19.2
+scipy==1.1.0
diff --git a/src/pybind/mgr/diskprediction_cloud/task.py b/src/pybind/mgr/diskprediction_cloud/task.py
new file mode 100644 (file)
index 0000000..d03450e
--- /dev/null
@@ -0,0 +1,171 @@
+from __future__ import absolute_import\r
+\r
+import time\r
+from threading import Event, Thread\r
+\r
+from .agent.metrics.ceph_cluster import CephClusterAgent\r
+from .agent.metrics.ceph_mon_osd import CephMonOsdAgent\r
+from .agent.metrics.ceph_pool import CephPoolAgent\r
+from .agent.metrics.db_relay import DBRelayAgent\r
+from .agent.metrics.sai_agent import SAIAgent\r
+from .agent.metrics.sai_cluster import SAICluserAgent\r
+from .agent.metrics.sai_disk import SAIDiskAgent\r
+from .agent.metrics.sai_disk_smart import SAIDiskSmartAgent\r
+from .agent.metrics.sai_host import SAIHostAgent\r
+from .common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK, DP_MGR_STAT_WARNING\r
+\r
+\r
+class AgentRunner(Thread):\r
+\r
+    task_name = ''\r
+    interval_key = ''\r
+    agents = []\r
+\r
+    def __init__(self, mgr_module, agent_timeout=60):\r
+        """\r
+\r
+        :param mgr_module: parent ceph mgr module\r
+        :param agent_timeout: (unit seconds) agent execute timeout value, default: 60 secs\r
+        """\r
+        Thread.__init__(self)\r
+        self._agent_timeout = agent_timeout\r
+        self._module_inst = mgr_module\r
+        self._log = mgr_module.log\r
+        self._start_time = time.time()\r
+        self._th = None\r
+\r
+        self.exit = False\r
+        self.event = Event()\r
+        self.task_interval = \\r
+            int(self._module_inst.get_configuration(self.interval_key))\r
+\r
+    def terminate(self):\r
+        self.exit = True\r
+        self.event.set()\r
+        self._log.info('PDS terminate %s complete' % self.task_name)\r
+\r
+    def run(self):\r
+        self._start_time = time.time()\r
+        self._log.info(\r
+            'start %s, interval: %s'\r
+            % (self.task_name, self.task_interval))\r
+        while not self.exit:\r
+            self.run_agents()\r
+            if self.event:\r
+                self.event.wait(int(self.task_interval))\r
+                self.event.clear()\r
+                self._log.info(\r
+                    'completed %s(%s)' % (self.task_name, time.time()-self._start_time))\r
+\r
+    def run_agents(self):\r
+        obj_sender = None\r
+        try:\r
+            self._log.debug('run_agents %s' % self.task_name)\r
+            from .common.grpcclient import GRPcClient, gen_configuration\r
+            conf = gen_configuration(\r
+                host=self._module_inst.get_configuration('diskprediction_server'),\r
+                user=self._module_inst.get_configuration('diskprediction_user'),\r
+                password=self._module_inst.get_configuration(\r
+                    'diskprediction_password'),\r
+                port=self._module_inst.get_configuration('diskprediction_port'),\r
+                cert_context=self._module_inst.get_configuration('diskprediction_cert_context'),\r
+                mgr_inst=self._module_inst,\r
+                ssl_target_name=self._module_inst.get_configuration('diskprediction_ssl_target_name_override'),\r
+                default_authority=self._module_inst.get_configuration('diskprediction_default_authority'))\r
+            obj_sender = GRPcClient(conf)\r
+            if not obj_sender:\r
+                self._log.error('invalid diskprediction sender')\r
+                self._module_inst.status = \\r
+                    {'status': DP_MGR_STAT_FAILED,\r
+                     'reason': 'invalid diskprediction sender'}\r
+                raise Exception('invalid diskprediction sender')\r
+            if obj_sender.test_connection():\r
+                self._module_inst.status = {'status': DP_MGR_STAT_OK}\r
+                self._log.debug('succeed to test connection')\r
+                self._run(self._module_inst, obj_sender)\r
+            else:\r
+                self._log.error('failed to test connection')\r
+                self._module_inst.status = \\r
+                    {'status': DP_MGR_STAT_FAILED,\r
+                     'reason': 'failed to test connection'}\r
+                raise Exception('failed to test connection')\r
+        except Exception as e:\r
+            self._module_inst.status = \\r
+                {'status': DP_MGR_STAT_FAILED,\r
+                 'reason': 'failed to start %s agents, %s'\r
+                           % (self.task_name, str(e))}\r
+            self._log.error(\r
+                'failed to start %s agents, %s' % (self.task_name, str(e)))\r
+            raise\r
+        finally:\r
+            if obj_sender:\r
+                obj_sender.close()\r
+\r
+    def is_timeout(self):\r
+        now = time.time()\r
+        if (now - self._start_time) > self._agent_timeout:\r
+            return True\r
+        else:\r
+            return False\r
+\r
+    def _run(self, module_inst, sender):\r
+        self._log.debug('%s run' % self.task_name)\r
+        for agent in self.agents:\r
+            self._start_time = time.time()\r
+            retry_count = 3\r
+            while retry_count:\r
+                retry_count -= 1\r
+                try:\r
+                    obj_agent = agent(module_inst, sender, self._agent_timeout)\r
+                    obj_agent.run()\r
+                    del obj_agent\r
+                    break\r
+                except Exception as e:\r
+                    if str(e).find('configuring') >= 0:\r
+                        self._log.debug(\r
+                            'failed to execute {}, {}, retry again.'.format(\r
+                                agent.measurement, str(e)))\r
+                        time.sleep(1)\r
+                        continue\r
+                    else:\r
+                        module_inst.status = \\r
+                            {'status': DP_MGR_STAT_WARNING,\r
+                             'reason': 'failed to execute {}, {}'.format(\r
+                                agent.measurement, ';'.join(str(e).split('\n\t')))}\r
+                        self._log.warning(\r
+                            'failed to execute {}, {}'.format(\r
+                                agent.measurement, ';'.join(str(e).split('\n\t'))))\r
+                        break\r
+\r
+\r
+class MetricsRunner(AgentRunner):\r
+\r
+    task_name = 'Metrics Agent'\r
+    interval_key = 'diskprediction_upload_metrics_interval'\r
+    agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent,\r
+              SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent,\r
+              SAIAgent]\r
+\r
+\r
+class SmartRunner(AgentRunner):\r
+\r
+    task_name = 'Smart data Agent'\r
+    interval_key = 'diskprediction_upload_smart_interval'\r
+    agents = [SAIDiskSmartAgent]\r
+\r
+\r
+class TestRunner(object):\r
+    task_name = 'Test Agent'\r
+    interval_key = 'diskprediction_upload_metrics_interval'\r
+    agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent,\r
+              SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent,\r
+              SAIAgent, SAIDiskSmartAgent]\r
+\r
+    def __init__(self, mgr_module):\r
+        self._module_inst = mgr_module\r
+\r
+    def run(self):\r
+        for agent in self.agents:\r
+            obj_agent = agent(self._module_inst, None)\r
+            obj_agent.run()\r
+            del obj_agent\r