]> git.apps.os.sepia.ceph.com Git - ceph-ci.git/commitdiff
pybind/mgr/diskprediction_cloud: remove mgr-diskprediction-cloud
authorKefu Chai <kchai@redhat.com>
Mon, 17 Aug 2020 07:35:25 +0000 (15:35 +0800)
committerKefu Chai <kchai@redhat.com>
Wed, 19 Aug 2020 03:08:38 +0000 (11:08 +0800)
the service offered by https://www.diskprophet.com is not accessible
anymore. before we have a replacement of it, let's drop
mgr-diskprediction-cloud. and remove its packaging.

Signed-off-by: Kefu Chai <kchai@redhat.com>
28 files changed:
PendingReleaseNotes
ceph.spec.in
debian/ceph-mgr-diskprediction-cloud.install [deleted file]
debian/ceph-mgr-diskprediction-cloud.postinst [deleted file]
debian/control
src/pybind/mgr/diskprediction_cloud/__init__.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/__init__.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py [deleted file]
src/pybind/mgr/diskprediction_cloud/agent/predictor.py [deleted file]
src/pybind/mgr/diskprediction_cloud/common/__init__.py [deleted file]
src/pybind/mgr/diskprediction_cloud/common/client_pb2.py [deleted file]
src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py [deleted file]
src/pybind/mgr/diskprediction_cloud/common/clusterdata.py [deleted file]
src/pybind/mgr/diskprediction_cloud/common/cypher.py [deleted file]
src/pybind/mgr/diskprediction_cloud/common/grpcclient.py [deleted file]
src/pybind/mgr/diskprediction_cloud/common/server.crt [deleted file]
src/pybind/mgr/diskprediction_cloud/module.py [deleted file]
src/pybind/mgr/diskprediction_cloud/requirements.txt [deleted file]
src/pybind/mgr/diskprediction_cloud/task.py [deleted file]

index 92f83d1316e36bba432c4c426387bc5919f619e5..1333b823321c57803688e625ad77a2a42e55bd7d 100644 (file)
   the balancer was included in the ``always_on_modules`` list, but needed to be
   turned on explicitly using the ``ceph balancer on`` command.
 
+* MGR: the "cloud" mode of the diskprediction module is not supported anymore
+  and the ``ceph-mgr-diskprediction-cloud`` manager module has been removed. This
+  is because the external cloud service run by ProphetStor is no longer accessible
+  and there is no immediate replacement for it at this time. The "local" prediction
+  mode will continue to be supported.
+
 * Cephadm: There were a lot of small usability improvements and bug fixes:
 
   * Grafana when deployed by Cephadm now binds to all network interfaces.
index 2f1dbc79ccaa6231ba47fa9b62084d983a04e175..691e145f5794f346578e5f0dcaad33a2c5b8bd6a 100644 (file)
@@ -458,7 +458,6 @@ Requires:       ceph-mgr-modules-core = %{_epoch_prefix}%{version}-%{release}
 %if 0%{?weak_deps}
 Recommends:    ceph-mgr-dashboard = %{_epoch_prefix}%{version}-%{release}
 Recommends:    ceph-mgr-diskprediction-local = %{_epoch_prefix}%{version}-%{release}
-Recommends:    ceph-mgr-diskprediction-cloud = %{_epoch_prefix}%{version}-%{release}
 Recommends:    ceph-mgr-k8sevents = %{_epoch_prefix}%{version}-%{release}
 Recommends:    ceph-mgr-cephadm = %{_epoch_prefix}%{version}-%{release}
 Recommends:    python%{python3_pkgversion}-influxdb
@@ -516,24 +515,6 @@ Requires:       python3-scipy
 ceph-mgr-diskprediction-local is a ceph-mgr module that tries to predict
 disk failures using local algorithms and machine-learning databases.
 
-%package mgr-diskprediction-cloud
-Summary:        Ceph Manager module for cloud-based disk failure prediction
-BuildArch:      noarch
-%if 0%{?suse_version}
-Group:          System/Filesystems
-%endif
-Requires:       ceph-mgr = %{_epoch_prefix}%{version}-%{release}
-%if 0%{without python2}
-Requires:       python3-grpcio
-Requires:       python3-protobuf
-%else
-Requires:       python2-grpcio
-Requires:       python2-protobuf
-%endif
-%description mgr-diskprediction-cloud
-ceph-mgr-diskprediction-cloud is a ceph-mgr module that tries to predict
-disk failures using services in the Google cloud.
-
 %package mgr-modules-core
 Summary:        Ceph Manager modules which are always enabled
 BuildArch:      noarch
@@ -1647,19 +1628,6 @@ if [ $1 -eq 1 ] ; then
     /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
 fi
 
-%files mgr-diskprediction-cloud
-%{_datadir}/ceph/mgr/diskprediction_cloud
-
-%post mgr-diskprediction-cloud
-if [ $1 -eq 1 ] ; then
-    /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
-fi
-
-%postun mgr-diskprediction-cloud
-if [ $1 -eq 1 ] ; then
-    /usr/bin/systemctl try-restart ceph-mgr.target >/dev/null 2>&1 || :
-fi
-
 %files mgr-modules-core
 %dir %{_datadir}/ceph/mgr
 %{_datadir}/ceph/mgr/alerts
diff --git a/debian/ceph-mgr-diskprediction-cloud.install b/debian/ceph-mgr-diskprediction-cloud.install
deleted file mode 100644 (file)
index 58481b2..0000000
+++ /dev/null
@@ -1 +0,0 @@
-usr/share/ceph/mgr/diskprediction_cloud
diff --git a/debian/ceph-mgr-diskprediction-cloud.postinst b/debian/ceph-mgr-diskprediction-cloud.postinst
deleted file mode 100644 (file)
index d8e7a50..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-# vim: set noet ts=8:
-# postinst script for ceph-mgr-diskprediction-cloud
-#
-# see: dh_installdeb(1)
-
-set -e
-
-# summary of how this script can be called:
-#
-#      postinst configure <most-recently-configured-version>
-#      old-postinst abort-upgrade <new-version>
-#      conflictor's-postinst abort-remove in-favour <package> <new-version>
-#      postinst abort-remove
-#      deconfigured's-postinst abort-deconfigure in-favour <failed-install-package> <version> [<removing conflicting-package> <version>]
-#
-
-# for details, see http://www.debian.org/doc/debian-policy/ or
-# the debian-policy package
-
-case "$1" in
-    configure)
-       # attempt to load the plugin if the mgr is running
-       deb-systemd-invoke try-restart ceph-mgr.target
-    ;;
-    abort-upgrade|abort-remove|abort-deconfigure)
-       :
-    ;;
-
-    *)
-        echo "postinst called with unknown argument \`$1'" >&2
-        exit 1
-    ;;
-esac
-
-# dh_installdeb will replace this with shell code automatically
-# generated by other debhelper scripts.
-
-#DEBHELPER#
-
-exit 0
-
-
index d83f8425b8e2bc1636616b0eb2855619141c73c2..c4425a3aadb69729b4e3eadbb2324b4e7b7640e8 100644 (file)
@@ -225,7 +225,6 @@ Depends: ceph-base (= ${binary:Version}),
          ${shlibs:Depends},
 Recommends: ceph-mgr-dashboard,
             ceph-mgr-diskprediction-local,
-            ceph-mgr-diskprediction-cloud,
             ceph-mgr-k8sevents,
             ceph-mgr-cephadm
 Suggests: python3-influxdb
@@ -279,20 +278,6 @@ Description: diskprediction-local module for ceph-mgr
  This package contains the diskprediction_local module for the ceph-mgr
  daemon, which helps predict disk failures.
 
-Package: ceph-mgr-diskprediction-cloud
-Architecture: all
-Depends: ceph-mgr (= ${binary:Version}),
-         ${misc:Depends},
-         ${python:Depends},
-         ${shlibs:Depends},
-Description: diskprediction-cloud module for ceph-mgr
- Ceph is a massively scalable, open-source, distributed
- storage system that runs on commodity hardware and delivers object,
- block and file system storage.
- .
- This package contains the diskprediction_cloud module for the ceph-mgr
- daemon, which helps predict disk failures.
-
 Package: ceph-mgr-modules-core
 Architecture: all
 Depends: ${misc:Depends},
diff --git a/src/pybind/mgr/diskprediction_cloud/__init__.py b/src/pybind/mgr/diskprediction_cloud/__init__.py
deleted file mode 100644 (file)
index 8f210ac..0000000
+++ /dev/null
@@ -1 +0,0 @@
-from .module import Module
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/__init__.py
deleted file mode 100644 (file)
index e7e0ddc..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-from __future__ import absolute_import\r
-\r
-from ..common import timeout, TimeoutError\r
-\r
-\r
-class BaseAgent(object):\r
-\r
-    measurement = ''\r
-\r
-    def __init__(self, mgr_module, obj_sender, timeout=30):\r
-        self.data = []\r
-        self._client = None\r
-        self._client = obj_sender\r
-        self._logger = mgr_module.log\r
-        self._module_inst = mgr_module\r
-        self._timeout = timeout\r
-\r
-    def run(self):\r
-        try:\r
-            self._collect_data()\r
-            self._run()\r
-        except TimeoutError:\r
-            self._logger.error('{} failed to execute {} task'.format(\r
-                __name__, self.measurement))\r
-\r
-    def __nonzero__(self):\r
-        if not self._module_inst:\r
-            return False\r
-        else:\r
-            return True\r
-\r
-    @timeout\r
-    def _run(self):\r
-        pass\r
-\r
-    @timeout\r
-    def _collect_data(self):\r
-        pass\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/__init__.py
deleted file mode 100644 (file)
index 9e7e5b0..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-from __future__ import absolute_import\r
-\r
-from .. import BaseAgent\r
-from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING, DP_MGR_STAT_OK\r
-\r
-AGENT_VERSION = '1.0.0'\r
-\r
-\r
-class MetricsField(object):\r
-    def __init__(self):\r
-        self.tags = {}\r
-        self.fields = {}\r
-        self.timestamp = None\r
-\r
-    def __str__(self):\r
-        return str({\r
-            'tags': self.tags,\r
-            'fields': self.fields,\r
-            'timestamp': self.timestamp\r
-        })\r
-\r
-\r
-class MetricsAgent(BaseAgent):\r
-\r
-    def log_summary(self, status_info):\r
-        try:\r
-            if status_info:\r
-                measurement = status_info['measurement']\r
-                success_count = status_info['success_count']\r
-                failure_count = status_info['failure_count']\r
-                total_count = success_count + failure_count\r
-                display_string = \\r
-                    '%s agent stats in total count: %s, success count: %s, failure count: %s.'\r
-                self._logger.info(\r
-                    display_string % (measurement, total_count, success_count, failure_count)\r
-                )\r
-        except Exception as e:\r
-            self._logger.error(str(e))\r
-\r
-    def _run(self):\r
-        collect_data = self.data\r
-        result = {}\r
-        if collect_data and self._client:\r
-            status_info = self._client.send_info(collect_data, self.measurement)\r
-            # show summary info\r
-            self.log_summary(status_info)\r
-            # write sub_agent buffer\r
-            total_count = status_info['success_count'] + status_info['failure_count']\r
-            if total_count:\r
-                if status_info['success_count'] == 0:\r
-                    self._module_inst.status = \\r
-                        {'status': DP_MGR_STAT_FAILED,\r
-                         'reason': 'failed to send metrics data to the server'}\r
-                elif status_info['failure_count'] == 0:\r
-                    self._module_inst.status = \\r
-                        {'status': DP_MGR_STAT_OK}\r
-                else:\r
-                    self._module_inst.status = \\r
-                        {'status': DP_MGR_STAT_WARNING,\r
-                         'reason': 'failed to send partial metrics data to the server'}\r
-        return result\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_cluster.py
deleted file mode 100644 (file)
index 2491644..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class CephCluster(MetricsField):\r
-    """ Ceph cluster structure """\r
-    measurement = 'ceph_cluster'\r
-\r
-    def __init__(self):\r
-        super(CephCluster, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['cluster_health'] = ''\r
-        self.fields['num_mon'] = None\r
-        self.fields['num_mon_quorum'] = None\r
-        self.fields['num_osd'] = None\r
-        self.fields['num_osd_up'] = None\r
-        self.fields['num_osd_in'] = None\r
-        self.fields['osd_epoch'] = None\r
-        self.fields['osd_bytes'] = None\r
-        self.fields['osd_bytes_used'] = None\r
-        self.fields['osd_bytes_avail'] = None\r
-        self.fields['num_pool'] = None\r
-        self.fields['num_pg'] = None\r
-        self.fields['num_pg_active_clean'] = None\r
-        self.fields['num_pg_active'] = None\r
-        self.fields['num_pg_peering'] = None\r
-        self.fields['num_object'] = None\r
-        self.fields['num_object_degraded'] = None\r
-        self.fields['num_object_misplaced'] = None\r
-        self.fields['num_object_unfound'] = None\r
-        self.fields['num_bytes'] = None\r
-        self.fields['num_mds_up'] = None\r
-        self.fields['num_mds_in'] = None\r
-        self.fields['num_mds_failed'] = None\r
-        self.fields['mds_epoch'] = None\r
-\r
-\r
-class CephClusterAgent(MetricsAgent):\r
-    measurement = 'ceph_cluster'\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        cluster_id = obj_api.get_cluster_id()\r
-\r
-        c_data = CephCluster()\r
-        cluster_state = obj_api.get_health_status()\r
-        c_data.tags['cluster_id'] = cluster_id\r
-        c_data.fields['cluster_health'] = str(cluster_state)\r
-        c_data.fields['agenthost'] = socket.gethostname()\r
-        c_data.tags['agenthost_domain_id'] = cluster_id\r
-        c_data.fields['osd_epoch'] = obj_api.get_osd_epoch()\r
-        c_data.fields['num_mon'] = len(obj_api.get_mons())\r
-        c_data.fields['num_mon_quorum'] = \\r
-            len(obj_api.get_mon_status().get('quorum', []))\r
-\r
-        osds = obj_api.get_osds()\r
-        num_osd_up = 0\r
-        num_osd_in = 0\r
-        for osd_data in osds:\r
-            if osd_data.get('up'):\r
-                num_osd_up = num_osd_up + 1\r
-            if osd_data.get('in'):\r
-                num_osd_in = num_osd_in + 1\r
-        if osds:\r
-            c_data.fields['num_osd'] = len(osds)\r
-        else:\r
-            c_data.fields['num_osd'] = 0\r
-        c_data.fields['num_osd_up'] = num_osd_up\r
-        c_data.fields['num_osd_in'] = num_osd_in\r
-        c_data.fields['num_pool'] = len(obj_api.get_osd_pools())\r
-\r
-        df_stats = obj_api.module.get('df').get('stats', {})\r
-        total_bytes = df_stats.get('total_bytes', 0)\r
-        total_used_bytes = df_stats.get('total_used_bytes', 0)\r
-        total_avail_bytes = df_stats.get('total_avail_bytes', 0)\r
-        c_data.fields['osd_bytes'] = total_bytes\r
-        c_data.fields['osd_bytes_used'] = total_used_bytes\r
-        c_data.fields['osd_bytes_avail'] = total_avail_bytes\r
-        if total_bytes and total_avail_bytes:\r
-            c_data.fields['osd_bytes_used_percentage'] = \\r
-                round((float(total_used_bytes) / float(total_bytes)) * 100, 4)\r
-        else:\r
-            c_data.fields['osd_bytes_used_percentage'] = 0.0000\r
-\r
-        pg_stats = obj_api.module.get('pg_stats').get('pg_stats', [])\r
-        num_bytes = 0\r
-        num_object = 0\r
-        num_object_degraded = 0\r
-        num_object_misplaced = 0\r
-        num_object_unfound = 0\r
-        num_pg_active = 0\r
-        num_pg_active_clean = 0\r
-        num_pg_peering = 0\r
-        for pg_data in pg_stats:\r
-            num_pg_active = num_pg_active + len(pg_data.get('acting'))\r
-            if 'active+clean' in pg_data.get('state'):\r
-                num_pg_active_clean = num_pg_active_clean + 1\r
-            if 'peering' in pg_data.get('state'):\r
-                num_pg_peering = num_pg_peering + 1\r
-\r
-            stat_sum = pg_data.get('stat_sum', {})\r
-            num_object = num_object + stat_sum.get('num_objects', 0)\r
-            num_object_degraded = \\r
-                num_object_degraded + stat_sum.get('num_objects_degraded', 0)\r
-            num_object_misplaced = \\r
-                num_object_misplaced + stat_sum.get('num_objects_misplaced', 0)\r
-            num_object_unfound = \\r
-                num_object_unfound + stat_sum.get('num_objects_unfound', 0)\r
-            num_bytes = num_bytes + stat_sum.get('num_bytes', 0)\r
-\r
-        c_data.fields['num_pg'] = len(pg_stats)\r
-        c_data.fields['num_object'] = num_object\r
-        c_data.fields['num_object_degraded'] = num_object_degraded\r
-        c_data.fields['num_object_misplaced'] = num_object_misplaced\r
-        c_data.fields['num_object_unfound'] = num_object_unfound\r
-        c_data.fields['num_bytes'] = num_bytes\r
-        c_data.fields['num_pg_active'] = num_pg_active\r
-        c_data.fields['num_pg_active_clean'] = num_pg_active_clean\r
-        c_data.fields['num_pg_peering'] = num_pg_active_clean\r
-\r
-        filesystems = obj_api.get_file_systems()\r
-        num_mds_in = 0\r
-        num_mds_up = 0\r
-        num_mds_failed = 0\r
-        mds_epoch = 0\r
-        for fs_data in filesystems:\r
-            num_mds_in = \\r
-                num_mds_in + len(fs_data.get('mdsmap', {}).get('in', []))\r
-            num_mds_up = \\r
-                num_mds_up + len(fs_data.get('mdsmap', {}).get('up', {}))\r
-            num_mds_failed = \\r
-                num_mds_failed + len(fs_data.get('mdsmap', {}).get('failed', []))\r
-            mds_epoch = mds_epoch + fs_data.get('mdsmap', {}).get('epoch', 0)\r
-        c_data.fields['num_mds_in'] = num_mds_in\r
-        c_data.fields['num_mds_up'] = num_mds_up\r
-        c_data.fields['num_mds_failed'] = num_mds_failed\r
-        c_data.fields['mds_epoch'] = mds_epoch\r
-        self.data.append(c_data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_mon_osd.py
deleted file mode 100644 (file)
index 4b4d8fa..0000000
+++ /dev/null
@@ -1,222 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class CephMON(MetricsField):\r
-    """ Ceph monitor structure """\r
-    measurement = 'ceph_mon'\r
-\r
-    def __init__(self):\r
-        super(CephMON, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.tags['mon_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['num_sessions'] = None\r
-        self.fields['session_add'] = None\r
-        self.fields['session_rm'] = None\r
-        self.fields['session_trim'] = None\r
-        self.fields['num_elections'] = None\r
-        self.fields['election_call'] = None\r
-        self.fields['election_win'] = None\r
-        self.fields['election_lose'] = None\r
-\r
-\r
-class CephErasureProfile(MetricsField):\r
-    """ Ceph osd erasure profile """\r
-    measurement = 'ceph_erasure_profile'\r
-\r
-    def __init__(self):\r
-        super(CephErasureProfile, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['host_domain_id'] = None\r
-        self.fields['name'] = None\r
-\r
-\r
-class CephOsdTree(MetricsField):\r
-    """ Ceph osd tree map """\r
-    measurement = 'ceph_osd_tree'\r
-\r
-    def __init__(self):\r
-        super(CephOsdTree, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['host_domain_id'] = None\r
-        self.fields['name'] = None\r
-\r
-\r
-class CephOSD(MetricsField):\r
-    """ Ceph osd structure """\r
-    measurement = 'ceph_osd'\r
-\r
-    def __init__(self):\r
-        super(CephOSD, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.tags['osd_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['host_domain_id'] = None\r
-        self.fields['op_w'] = None\r
-        self.fields['op_in_bytes'] = None\r
-        self.fields['op_r'] = None\r
-        self.fields['op_out_bytes'] = None\r
-        self.fields['op_wip'] = None\r
-        self.fields['op_latency'] = None\r
-        self.fields['op_process_latency'] = None\r
-        self.fields['op_r_latency'] = None\r
-        self.fields['op_r_process_latency'] = None\r
-        self.fields['op_w_in_bytes'] = None\r
-        self.fields['op_w_latency'] = None\r
-        self.fields['op_w_process_latency'] = None\r
-        self.fields['op_w_prepare_latency'] = None\r
-        self.fields['op_rw'] = None\r
-        self.fields['op_rw_in_bytes'] = None\r
-        self.fields['op_rw_out_bytes'] = None\r
-        self.fields['op_rw_latency'] = None\r
-        self.fields['op_rw_process_latency'] = None\r
-        self.fields['op_rw_prepare_latency'] = None\r
-        self.fields['op_before_queue_op_lat'] = None\r
-        self.fields['op_before_dequeue_op_lat'] = None\r
-\r
-\r
-class CephMonOsdAgent(MetricsAgent):\r
-    measurement = 'ceph_mon_osd'\r
-\r
-    # counter types\r
-    PERFCOUNTER_LONGRUNAVG = 4\r
-    PERFCOUNTER_COUNTER = 8\r
-    PERFCOUNTER_HISTOGRAM = 0x10\r
-    PERFCOUNTER_TYPE_MASK = ~3\r
-\r
-    def _stattype_to_str(self, stattype):\r
-        typeonly = stattype & self.PERFCOUNTER_TYPE_MASK\r
-        if typeonly == 0:\r
-            return 'gauge'\r
-        if typeonly == self.PERFCOUNTER_LONGRUNAVG:\r
-            # this lie matches the DaemonState decoding: only val, no counts\r
-            return 'counter'\r
-        if typeonly == self.PERFCOUNTER_COUNTER:\r
-            return 'counter'\r
-        if typeonly == self.PERFCOUNTER_HISTOGRAM:\r
-            return 'histogram'\r
-        return ''\r
-\r
-    def _generage_osd_erasure_profile(self, cluster_id):\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        osd_map = obj_api.module.get('osd_map')\r
-        if osd_map:\r
-            for n, n_value in osd_map.get('erasure_code_profiles', {}).items():\r
-                e_osd = CephErasureProfile()\r
-                e_osd.fields['name'] = n\r
-                e_osd.tags['cluster_id'] = cluster_id\r
-                e_osd.fields['agenthost'] = socket.gethostname()\r
-                e_osd.tags['agenthost_domain_id'] = cluster_id\r
-                e_osd.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname())\r
-                for k in n_value.keys():\r
-                    e_osd.fields[k] = str(n_value[k])\r
-                self.data.append(e_osd)\r
-\r
-    def _generate_osd_tree(self, cluster_id):\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        osd_tree = obj_api.module.get('osd_map_tree')\r
-        if osd_tree:\r
-            for node in osd_tree.get('nodes', []):\r
-                n_node = CephOsdTree()\r
-                n_node.tags['cluster_id'] = cluster_id\r
-                n_node.fields['agenthost'] = socket.gethostname()\r
-                n_node.tags['agenthost_domain_id'] = cluster_id\r
-                n_node.tags['host_domain_id'] = '%s_%s' % (cluster_id, socket.gethostname())\r
-                n_node.fields['children'] = ','.join(str(x) for x in node.get('children', []))\r
-                n_node.fields['type_id'] = str(node.get('type_id', ''))\r
-                n_node.fields['id'] = str(node.get('id', ''))\r
-                n_node.fields['name'] = str(node.get('name', ''))\r
-                n_node.fields['type'] = str(node.get('type', ''))\r
-                n_node.fields['reweight'] = float(node.get('reweight', 0.0))\r
-                n_node.fields['crush_weight'] = float(node.get('crush_weight', 0.0))\r
-                n_node.fields['primary_affinity'] = float(node.get('primary_affinity', 0.0))\r
-                n_node.fields['device_class'] = str(node.get('device_class', ''))\r
-                self.data.append(n_node)\r
-\r
-    def _generate_osd(self, cluster_id, service_name, perf_counts):\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        service_id = service_name[4:]\r
-        d_osd = CephOSD()\r
-        stat_bytes = 0\r
-        stat_bytes_used = 0\r
-        d_osd.tags['cluster_id'] = cluster_id\r
-        d_osd.tags['osd_id'] = service_name[4:]\r
-        d_osd.fields['agenthost'] = socket.gethostname()\r
-        d_osd.tags['agenthost_domain_id'] = cluster_id\r
-        d_osd.tags['host_domain_id'] = \\r
-            '%s_%s' % (cluster_id,\r
-                       obj_api.get_osd_hostname(d_osd.tags['osd_id']))\r
-\r
-        for i_key, i_val in perf_counts.items():\r
-            if i_key[:4] == 'osd.':\r
-                key_name = i_key[4:]\r
-            else:\r
-                key_name = i_key\r
-            if self._stattype_to_str(i_val['type']) == 'counter':\r
-                value = obj_api.get_rate('osd', service_id, i_key)\r
-            else:\r
-                value = obj_api.get_latest('osd', service_id, i_key)\r
-            if key_name == 'stat_bytes':\r
-                stat_bytes = value\r
-            elif key_name == 'stat_bytes_used':\r
-                stat_bytes_used = value\r
-            else:\r
-                d_osd.fields[key_name] = float(value)\r
-\r
-        if stat_bytes and stat_bytes_used:\r
-            d_osd.fields['stat_bytes_used_percentage'] = \\r
-                round((float(stat_bytes_used) / float(stat_bytes)) * 100, 4)\r
-        else:\r
-            d_osd.fields['stat_bytes_used_percentage'] = 0.0000\r
-        self.data.append(d_osd)\r
-\r
-    def _generate_mon(self, cluster_id, service_name, perf_counts):\r
-        d_mon = CephMON()\r
-        d_mon.tags['cluster_id'] = cluster_id\r
-        d_mon.tags['mon_id'] = service_name[4:]\r
-        d_mon.fields['agenthost'] = socket.gethostname()\r
-        d_mon.tags['agenthost_domain_id'] = cluster_id\r
-        d_mon.fields['num_sessions'] = \\r
-            perf_counts.get('mon.num_sessions', {}).get('value', 0)\r
-        d_mon.fields['session_add'] = \\r
-            perf_counts.get('mon.session_add', {}).get('value', 0)\r
-        d_mon.fields['session_rm'] = \\r
-            perf_counts.get('mon.session_rm', {}).get('value', 0)\r
-        d_mon.fields['session_trim'] = \\r
-            perf_counts.get('mon.session_trim', {}).get('value', 0)\r
-        d_mon.fields['num_elections'] = \\r
-            perf_counts.get('mon.num_elections', {}).get('value', 0)\r
-        d_mon.fields['election_call'] = \\r
-            perf_counts.get('mon.election_call', {}).get('value', 0)\r
-        d_mon.fields['election_win'] = \\r
-            perf_counts.get('mon.election_win', {}).get('value', 0)\r
-        d_mon.fields['election_lose'] = \\r
-            perf_counts.get('election_lose', {}).get('value', 0)\r
-        self.data.append(d_mon)\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        perf_data = obj_api.module.get_all_perf_counters(services=('mon', 'osd'))\r
-        if not perf_data and not isinstance(perf_data, dict):\r
-            self._logger.error('unable to get all perf counters')\r
-            return\r
-        cluster_id = obj_api.get_cluster_id()\r
-        for n_name, i_perf in perf_data.items():\r
-            if n_name[0:3].lower() == 'mon':\r
-                self._generate_mon(cluster_id, n_name, i_perf)\r
-            elif n_name[0:3].lower() == 'osd':\r
-                self._generate_osd(cluster_id, n_name, i_perf)\r
-        self._generage_osd_erasure_profile(cluster_id)\r
-        self._generate_osd_tree(cluster_id)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/ceph_pool.py
deleted file mode 100644 (file)
index e8b3956..0000000
+++ /dev/null
@@ -1,57 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class CephPool(MetricsField):\r
-    """ Ceph pool structure """\r
-    measurement = 'ceph_pool'\r
-\r
-    def __init__(self):\r
-        super(CephPool, self).__init__()\r
-        self.tags['cluster_id'] = None\r
-        self.tags['pool_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['bytes_used'] = None\r
-        self.fields['max_avail'] = None\r
-        self.fields['objects'] = None\r
-        self.fields['wr_bytes'] = None\r
-        self.fields['dirty'] = None\r
-        self.fields['rd_bytes'] = None\r
-        self.fields['stored_raw'] = None\r
-\r
-\r
-class CephPoolAgent(MetricsAgent):\r
-    measurement = 'ceph_pool'\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        df_data = obj_api.module.get('df')\r
-        cluster_id = obj_api.get_cluster_id()\r
-        for pool in df_data.get('pools', []):\r
-            d_pool = CephPool()\r
-            p_id = pool.get('id')\r
-            d_pool.tags['cluster_id'] = cluster_id\r
-            d_pool.tags['pool_id'] = p_id\r
-            d_pool.fields['agenthost'] = socket.gethostname()\r
-            d_pool.tags['agenthost_domain_id'] = cluster_id\r
-            d_pool.fields['bytes_used'] = \\r
-                pool.get('stats', {}).get('bytes_used', 0)\r
-            d_pool.fields['max_avail'] = \\r
-                pool.get('stats', {}).get('max_avail', 0)\r
-            d_pool.fields['objects'] = \\r
-                pool.get('stats', {}).get('objects', 0)\r
-            d_pool.fields['wr_bytes'] = \\r
-                pool.get('stats', {}).get('wr_bytes', 0)\r
-            d_pool.fields['dirty'] = \\r
-                pool.get('stats', {}).get('dirty', 0)\r
-            d_pool.fields['rd_bytes'] = \\r
-                pool.get('stats', {}).get('rd_bytes', 0)\r
-            d_pool.fields['stored_raw'] = \\r
-                pool.get('stats', {}).get('stored_raw', 0)\r
-            self.data.append(d_pool)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/db_relay.py
deleted file mode 100644 (file)
index 2f5d60d..0000000
+++ /dev/null
@@ -1,703 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import re\r
-import socket\r
-\r
-from . import MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-from ...common.cypher import CypherOP, NodeInfo\r
-\r
-\r
-class BaseDP(object):\r
-    """ basic diskprediction structure """\r
-    _fields = []\r
-\r
-    def __init__(self, *args, **kwargs):\r
-        if len(args) > len(self._fields):\r
-            raise TypeError('Expected {} arguments'.format(len(self._fields)))\r
-\r
-        for name, value in zip(self._fields, args):\r
-            setattr(self, name, value)\r
-\r
-        for name in self._fields[len(args):]:\r
-            setattr(self, name, kwargs.pop(name))\r
-\r
-        if kwargs:\r
-            raise TypeError('Invalid argument(s): {}'.format(','.join(kwargs)))\r
-\r
-\r
-class MGRDpCeph(BaseDP):\r
-    _fields = [\r
-        'fsid', 'health', 'max_osd', 'size',\r
-        'avail_size', 'raw_used', 'raw_used_percent'\r
-    ]\r
-\r
-\r
-class MGRDpHost(BaseDP):\r
-    _fields = ['fsid', 'host', 'ipaddr']\r
-\r
-\r
-class MGRDpMon(BaseDP):\r
-    _fields = ['fsid', 'host', 'ipaddr']\r
-\r
-\r
-class MGRDpOsd(BaseDP):\r
-    _fields = [\r
-        'fsid', 'host', '_id', 'uuid', 'up', '_in', 'weight', 'public_addr',\r
-        'cluster_addr', 'state', 'ceph_release', 'osd_devices', 'rotational'\r
-    ]\r
-\r
-\r
-class MGRDpMds(BaseDP):\r
-    _fields = ['fsid', 'host', 'ipaddr']\r
-\r
-\r
-class MGRDpPool(BaseDP):\r
-    _fields = [\r
-        'fsid', 'size', 'pool_name', 'pool_id', 'type', 'min_size',\r
-        'pg_num', 'pgp_num', 'created_time', 'pgids', 'osd_ids', 'tiers', 'cache_mode',\r
-        'erasure_code_profile', 'tier_of'\r
-    ]\r
-\r
-\r
-class MGRDpRBD(BaseDP):\r
-    _fields = ['fsid', '_id', 'name', 'pool_name', 'pool_id']\r
-\r
-\r
-class MGRDpFS(BaseDP):\r
-    _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pools', 'mds_nodes']\r
-\r
-\r
-class MGRDpPG(BaseDP):\r
-    _fields = [\r
-        'fsid', 'pgid', 'up_osds', 'acting_osds', 'state',\r
-        'objects', 'degraded', 'misplaced', 'unfound'\r
-    ]\r
-\r
-\r
-class MGRDpDisk(BaseDP):\r
-    _fields = ['host_domain_id', 'host', 'fs_journal_osd', 'bs_db_osd', 'bs_wal_osd', 'data_osd', 'osd_ids']\r
-\r
-\r
-class DBRelay(MetricsField):\r
-    """ DB Relay structure """\r
-    measurement = 'db_relay'\r
-\r
-    def __init__(self):\r
-        super(DBRelay, self).__init__()\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['dc_tag'] = 'na'\r
-        self.tags['host'] = None\r
-        self.fields['cmd'] = None\r
-\r
-\r
-class DBRelayAgent(MetricsAgent):\r
-    measurement = 'db_relay'\r
-\r
-    def __init__(self, *args, **kwargs):\r
-        super(DBRelayAgent, self).__init__(*args, **kwargs)\r
-        self._cluster_node = None\r
-        self._cluster_id = None\r
-        self._ceph = ClusterAPI(self._module_inst)\r
-        self._osd_maps = self._ceph.module.get('osd_map')\r
-        self._mon_maps = self._ceph.module.get('mon_map')\r
-        self._fs_maps = self._ceph.module.get('fs_map')\r
-        self._osd_metadata = self._ceph.module.get('osd_metadata')\r
-        self._host_nodes = dict()\r
-        self._osd_nodes = dict()\r
-        self._mon_nodes = dict()\r
-        self._mds_nodes = dict()\r
-        self._dev_nodes = dict()\r
-        self._pool_nodes = dict()\r
-        self._rbd_nodes = dict()\r
-        self._fs_nodes = dict()\r
-        # initial ceph all node states\r
-        self._init_cluster_node()\r
-        self._init_hosts()\r
-        self._init_mons()\r
-        self._init_mds()\r
-        self._init_osds()\r
-        self._init_devices()\r
-        self._init_pools()\r
-        self._init_rbds()\r
-        self._init_fs()\r
-\r
-    def _init_hosts(self):\r
-        hosts = set()\r
-        # Add host from osd\r
-        osd_data = self._osd_maps.get('osds', [])\r
-        for _data in osd_data:\r
-            osd_id = _data['osd']\r
-            if not _data.get('in'):\r
-                continue\r
-            osd_addr = _data['public_addr'].split(':')[0]\r
-            osd_metadata = self._ceph.get_osd_metadata(osd_id)\r
-            if osd_metadata:\r
-                osd_host = osd_metadata['hostname']\r
-                hosts.add((osd_host, osd_addr))\r
-\r
-        # Add host from mon\r
-        mons = self._mon_maps.get('mons', [])\r
-        for _data in mons:\r
-            mon_host = _data['name']\r
-            mon_addr = _data['public_addr'].split(':')[0]\r
-            if mon_host:\r
-                hosts.add((mon_host, mon_addr))\r
-\r
-        # Add host from mds\r
-        file_systems = self._fs_maps.get('filesystems', [])\r
-        for _data in file_systems:\r
-            mds_info = _data.get('mdsmap').get('info')\r
-            for _gid in mds_info:\r
-                mds_data = mds_info[_gid]\r
-                mds_addr = mds_data.get('addr').split(':')[0]\r
-                mds_host = mds_data.get('name')\r
-                if mds_host:\r
-                    hosts.add((mds_host, mds_addr))\r
-        for tp in hosts:\r
-            host = tp[0]\r
-            self._host_nodes[host] = None\r
-\r
-            host_node = NodeInfo(\r
-                label='VMHost',\r
-                domain_id='{}_{}'.format(self._cluster_id, host),\r
-                name=host,\r
-                meta={}\r
-            )\r
-            self._host_nodes[host] = host_node\r
-\r
-    def _init_mons(self):\r
-        cluster_id = self._cluster_id\r
-        mons = self._mon_maps.get('mons')\r
-        for mon in mons:\r
-            mon_name = mon.get('name', '')\r
-            mon_addr = mon.get('addr', '').split(':')[0]\r
-            if mon_name not in self._host_nodes.keys():\r
-                continue\r
-\r
-            dp_mon = MGRDpMon(\r
-                fsid=cluster_id,\r
-                host=mon_name,\r
-                ipaddr=mon_addr\r
-            )\r
-\r
-            # create mon node\r
-            mon_node = NodeInfo(\r
-                label='CephMon',\r
-                domain_id='{}.mon.{}'.format(cluster_id, mon_name),\r
-                name=mon_name,\r
-                meta=dp_mon.__dict__\r
-            )\r
-            self._mon_nodes[mon_name] = mon_node\r
-\r
-    def _init_mds(self):\r
-        cluster_id = self._cluster_id\r
-        file_systems = self._fs_maps.get('filesystems', [])\r
-        for _data in file_systems:\r
-            mds_info = _data.get('mdsmap').get('info')\r
-            for _gid in mds_info:\r
-                mds_data = mds_info[_gid]\r
-                mds_addr = mds_data.get('addr').split(':')[0]\r
-                mds_host = mds_data.get('name')\r
-                mds_gid = mds_data.get('gid')\r
-\r
-                if mds_host not in self._host_nodes:\r
-                    continue\r
-\r
-                dp_mds = MGRDpMds(\r
-                    fsid=cluster_id,\r
-                    host=mds_host,\r
-                    ipaddr=mds_addr\r
-                )\r
-\r
-                # create osd node\r
-                mds_node = NodeInfo(\r
-                    label='CephMds',\r
-                    domain_id='{}.mds.{}'.format(cluster_id, mds_gid),\r
-                    name='MDS.{}'.format(mds_gid),\r
-                    meta=dp_mds.__dict__\r
-                )\r
-                self._mds_nodes[mds_host] = mds_node\r
-\r
-    def _init_osds(self):\r
-        for osd in self._osd_maps.get('osds', []):\r
-            osd_id = osd.get('osd', -1)\r
-            meta = self._osd_metadata.get(str(osd_id), {})\r
-            osd_host = meta['hostname']\r
-            osd_ceph_version = meta['ceph_version']\r
-            osd_rotational = meta['rotational']\r
-            osd_devices = meta['devices'].split(',')\r
-\r
-            # filter 'dm' device.\r
-            devices = []\r
-            for devname in osd_devices:\r
-                if 'dm' in devname:\r
-                    continue\r
-                devices.append(devname)\r
-\r
-            if osd_host not in self._host_nodes.keys():\r
-                continue\r
-            self._osd_nodes[str(osd_id)] = None\r
-            public_addr = []\r
-            cluster_addr = []\r
-            for addr in osd.get('public_addrs', {}).get('addrvec', []):\r
-                public_addr.append(addr.get('addr'))\r
-            for addr in osd.get('cluster_addrs', {}).get('addrvec', []):\r
-                cluster_addr.append(addr.get('addr'))\r
-            dp_osd = MGRDpOsd(\r
-                fsid=self._cluster_id,\r
-                host=osd_host,\r
-                _id=osd_id,\r
-                uuid=osd.get('uuid'),\r
-                up=osd.get('up'),\r
-                _in=osd.get('in'),\r
-                weight=osd.get('weight'),\r
-                public_addr=','.join(public_addr),\r
-                cluster_addr=','.join(cluster_addr),\r
-                state=','.join(osd.get('state', [])),\r
-                ceph_release=osd_ceph_version,\r
-                osd_devices=','.join(devices),\r
-                rotational=osd_rotational)\r
-            for k, v in meta.items():\r
-                setattr(dp_osd, k, v)\r
-\r
-            # create osd node\r
-            osd_node = NodeInfo(\r
-                label='CephOsd',\r
-                domain_id='{}.osd.{}'.format(self._cluster_id, osd_id),\r
-                name='OSD.{}'.format(osd_id),\r
-                meta=dp_osd.__dict__\r
-            )\r
-            self._osd_nodes[str(osd_id)] = osd_node\r
-\r
-    def _init_devices(self):\r
-        r = re.compile('[^/dev]\D+')\r
-        for osdid, o_val in self._osd_nodes.items():\r
-            o_devs = o_val.meta.get('device_ids', '').split(',')\r
-            # fs_store\r
-            journal_devs = o_val.meta.get('backend_filestore_journal_dev_node', '').split(',')\r
-            # bs_store\r
-            bs_db_devs = o_val.meta.get('bluefs_db_dev_node', '').split(',')\r
-            bs_wal_devs = o_val.meta.get('bluefs_wal_dev_node', '').split(',')\r
-\r
-            for dev in o_devs:\r
-                fs_journal = []\r
-                bs_db = []\r
-                bs_wal = []\r
-                data = []\r
-                if len(dev.split('=')) != 2:\r
-                    continue\r
-                dev_name = dev.split('=')[0]\r
-                dev_id = dev.split('=')[1]\r
-                if not dev_id:\r
-                    continue\r
-\r
-                for j_dev in journal_devs:\r
-                    if dev_name == ''.join(r.findall(j_dev)):\r
-                        fs_journal.append(osdid)\r
-                for db_dev in bs_db_devs:\r
-                    if dev_name == ''.join(r.findall(db_dev)):\r
-                        bs_db.append(osdid)\r
-                for wal_dev in bs_wal_devs:\r
-                    if dev_name == ''.join(r.findall(wal_dev)):\r
-                        bs_wal.append(osdid)\r
-\r
-                if not fs_journal and not bs_db and not bs_wal:\r
-                    data.append(osdid)\r
-\r
-                disk_domain_id = dev_id\r
-                if disk_domain_id not in self._dev_nodes.keys():\r
-                    dp_disk = MGRDpDisk(\r
-                        host_domain_id='{}_{}'.format(self._cluster_id, o_val.meta.get('host')),\r
-                        host=o_val.meta.get('host'),\r
-                        osd_ids=osdid,\r
-                        fs_journal_osd=','.join(str(x) for x in fs_journal) if fs_journal else '',\r
-                        bs_db_osd=','.join(str(x) for x in bs_db) if bs_db else '',\r
-                        bs_wal_osd=','.join(str(x) for x in bs_wal) if bs_wal else '',\r
-                        data_osd=','.join(str(x) for x in data) if data else ''\r
-                    )\r
-                    # create disk node\r
-                    disk_node = NodeInfo(\r
-                        label='VMDisk',\r
-                        domain_id=disk_domain_id,\r
-                        name=dev_name,\r
-                        meta=dp_disk.__dict__\r
-                    )\r
-                    self._dev_nodes[disk_domain_id] = disk_node\r
-                else:\r
-                    dev_node = self._dev_nodes[disk_domain_id]\r
-                    osd_ids = dev_node.meta.get('osd_ids', '')\r
-                    if osdid not in osd_ids.split(','):\r
-                        arr_value = osd_ids.split(',')\r
-                        arr_value.append(str(osdid))\r
-                        dev_node.meta['osd_ids'] = ','.join(arr_value)\r
-                    if fs_journal:\r
-                        arr_value = None\r
-                        for t in fs_journal:\r
-                            value = dev_node.meta.get('fs_journal_osd', '')\r
-                            if value:\r
-                                arr_value = value.split(',')\r
-                            else:\r
-                                arr_value = []\r
-                            if t not in arr_value:\r
-                                arr_value.append(t)\r
-                        if arr_value:\r
-                            dev_node.meta['fs_journal_osd'] = ','.join(str(x) for x in arr_value)\r
-                    if bs_db:\r
-                        arr_value = None\r
-                        for t in bs_db:\r
-                            value = dev_node.meta.get('bs_db_osd', '')\r
-                            if value:\r
-                                arr_value = value.split(',')\r
-                            else:\r
-                                arr_value = []\r
-                            if t not in arr_value:\r
-                                arr_value.append(t)\r
-                        if arr_value:\r
-                            dev_node.meta['bs_db_osd'] = ','.join(str(x) for x in arr_value)\r
-                    if bs_wal:\r
-                        arr_value = None\r
-                        for t in bs_wal:\r
-                            value = dev_node.meta.get('bs_wal_osd', '')\r
-                            if value:\r
-                                arr_value = value.split(',')\r
-                            else:\r
-                                arr_value = []\r
-                            if t not in arr_value:\r
-                                arr_value.append(t)\r
-                        if arr_value:\r
-                            dev_node.meta['bs_wal_osd'] = ','.join(str(x) for x in arr_value)\r
-                    if data:\r
-                        arr_value = None\r
-                        for t in data:\r
-                            value = dev_node.meta.get('data_osd', '')\r
-                            if value:\r
-                                arr_value = value.split(',')\r
-                            else:\r
-                                arr_value = []\r
-                            if t not in arr_value:\r
-                                arr_value.append(t)\r
-                        if arr_value:\r
-                            dev_node.meta['data_osd'] = ','.join(str(x) for x in arr_value)\r
-\r
-    def _init_cluster_node(self):\r
-        cluster_id = self._ceph.get_cluster_id()\r
-        ceph_df_stat = self._ceph.get_ceph_df_state()\r
-        dp_cluster = MGRDpCeph(\r
-            fsid=cluster_id,\r
-            health=self._ceph.get_health_status(),\r
-            max_osd=len(self._ceph.get_osds()),\r
-            size=ceph_df_stat.get('total_size'),\r
-            avail_size=ceph_df_stat.get('avail_size'),\r
-            raw_used=ceph_df_stat.get('raw_used_size'),\r
-            raw_used_percent=ceph_df_stat.get('used_percent')\r
-        )\r
-        cluster_name = cluster_id[-12:]\r
-        cluster_node = NodeInfo(\r
-            label='CephCluster',\r
-            domain_id=cluster_id,\r
-            name='cluster-{}'.format(cluster_name),\r
-            meta=dp_cluster.__dict__\r
-        )\r
-        self._cluster_id = cluster_id\r
-        self._cluster_node = cluster_node\r
-\r
-    def _init_pools(self):\r
-        pools = self._osd_maps.get('pools', [])\r
-        cluster_id = self._cluster_id\r
-        for pool in pools:\r
-            osds = []\r
-            pgs = self._ceph.get_pgs_up_by_poolid(int(pool.get('pool', -1)))\r
-            for pg_id, osd_id in pgs.items():\r
-                for o_id in osd_id:\r
-                    if o_id not in osds:\r
-                        osds.append(str(o_id))\r
-            dp_pool = MGRDpPool(\r
-                fsid=cluster_id,\r
-                size=pool.get('size'),\r
-                pool_name=pool.get('pool_name'),\r
-                pool_id=pool.get('pool'),\r
-                type=pool.get('type'),\r
-                min_size=pool.get('min_szie'),\r
-                pg_num=pool.get('pg_num'),\r
-                pgp_num=pool.get('pg_placement_num'),\r
-                created_time=pool.get('create_time'),\r
-                pgids=','.join(pgs.keys()),\r
-                osd_ids=','.join(osds),\r
-                tiers=','.join(str(x) for x in pool.get('tiers', [])),\r
-                cache_mode=pool.get('cache_mode', ''),\r
-                erasure_code_profile=str(pool.get('erasure_code_profile', '')),\r
-                tier_of=str(pool.get('tier_of', -1)))\r
-            # create pool node\r
-            pool_node = NodeInfo(\r
-                label='CephPool',\r
-                domain_id='{}_pool_{}'.format(cluster_id, pool.get('pool')),\r
-                name=pool.get('pool_name'),\r
-                meta=dp_pool.__dict__\r
-            )\r
-            self._pool_nodes[str(pool.get('pool'))] = pool_node\r
-\r
-    def _init_rbds(self):\r
-        cluster_id = self._cluster_id\r
-        for p_id, p_node in self._pool_nodes.items():\r
-            rbds = self._ceph.get_rbd_list(p_node.name)\r
-            self._rbd_nodes[str(p_id)] = []\r
-            for rbd in rbds:\r
-                dp_rbd = MGRDpRBD(\r
-                    fsid=cluster_id,\r
-                    _id=rbd['id'],\r
-                    name=rbd['name'],\r
-                    pool_name=rbd['pool_name'],\r
-                    pool_id=p_id,\r
-                )\r
-                # create pool node\r
-                rbd_node = NodeInfo(\r
-                    label='CephRBD',\r
-                    domain_id='{}_rbd_{}'.format(cluster_id, rbd['id']),\r
-                    name=rbd['name'],\r
-                    meta=dp_rbd.__dict__,\r
-                )\r
-                self._rbd_nodes[str(p_id)].append(rbd_node)\r
-\r
-    def _init_fs(self):\r
-        # _fields = ['fsid', '_id', 'name', 'metadata_pool', 'data_pool', 'mds_nodes']\r
-        cluster_id = self._cluster_id\r
-        file_systems = self._fs_maps.get('filesystems', [])\r
-        for fs in file_systems:\r
-            mdsmap = fs.get('mdsmap', {})\r
-            mds_hostnames = []\r
-            for m, md in mdsmap.get('info', {}).items():\r
-                if md.get('name') not in mds_hostnames:\r
-                    mds_hostnames.append(md.get('name'))\r
-            dp_fs = MGRDpFS(\r
-                fsid=cluster_id,\r
-                _id=fs.get('id'),\r
-                name=mdsmap.get('fs_name'),\r
-                metadata_pool=str(mdsmap.get('metadata_pool', -1)),\r
-                data_pools=','.join(str(i) for i in mdsmap.get('data_pools', [])),\r
-                mds_nodes=','.join(mds_hostnames),\r
-            )\r
-            fs_node = NodeInfo(\r
-                label='CephFS',\r
-                domain_id='{}_fs_{}'.format(cluster_id, fs.get('id')),\r
-                name=mdsmap.get('fs_name'),\r
-                meta=dp_fs.__dict__,\r
-            )\r
-            self._fs_nodes[str(fs.get('id'))] = fs_node\r
-\r
-    def _cluster_contains_host(self):\r
-        cluster_id = self._cluster_id\r
-        cluster_node = self._cluster_node\r
-\r
-        # create node relation\r
-        for h_id, h_node in self._host_nodes.items():\r
-            data = DBRelay()\r
-            # add osd node relationship\r
-            cypher_cmd = CypherOP.add_link(\r
-                cluster_node,\r
-                h_node,\r
-                'CephClusterContainsHost'\r
-            )\r
-            cluster_host = socket.gethostname()\r
-            data.fields['agenthost'] = cluster_host\r
-            data.tags['agenthost_domain_id'] = cluster_id\r
-            data.tags['host'] = cluster_host\r
-            data.fields['cmd'] = str(cypher_cmd)\r
-            self.data.append(data)\r
-\r
-    def _host_contains_mon(self):\r
-        for m_name, m_node in self._mon_nodes.items():\r
-            host_node = self._host_nodes.get(m_name)\r
-            if not host_node:\r
-                continue\r
-            data = DBRelay()\r
-            # add mon node relationship\r
-            cypher_cmd = CypherOP.add_link(\r
-                host_node,\r
-                m_node,\r
-                'HostContainsMon'\r
-            )\r
-            cluster_host = socket.gethostname()\r
-            data.fields['agenthost'] = cluster_host\r
-            data.tags['agenthost_domain_id'] = self._cluster_id\r
-            data.tags['host'] = cluster_host\r
-            data.fields['cmd'] = str(cypher_cmd)\r
-            self.data.append(data)\r
-\r
-    def _host_contains_osd(self):\r
-        cluster_id = self._cluster_id\r
-        for o_id, o_node in self._osd_nodes.items():\r
-            host_node = self._host_nodes.get(o_node.meta.get('host'))\r
-            if not host_node:\r
-                continue\r
-            data = DBRelay()\r
-            # add osd node relationship\r
-            cypher_cmd = CypherOP.add_link(\r
-                host_node,\r
-                o_node,\r
-                'HostContainsOsd'\r
-            )\r
-            cluster_host = socket.gethostname()\r
-            data.fields['agenthost'] = cluster_host\r
-            data.tags['agenthost_domain_id'] = cluster_id, data\r
-            data.tags['host'] = cluster_host\r
-            data.fields['cmd'] = str(cypher_cmd)\r
-            self.data.append(data)\r
-\r
-    def _host_contains_mds(self):\r
-        cluster_id = self._cluster_id\r
-        for m_name, mds_node in self._mds_nodes.items():\r
-            data = DBRelay()\r
-            host_node = self._host_nodes.get(mds_node.meta.get('host'))\r
-            if not host_node:\r
-                continue\r
-            # add osd node relationship\r
-            cypher_cmd = CypherOP.add_link(\r
-                host_node,\r
-                mds_node,\r
-                'HostContainsMds'\r
-            )\r
-            cluster_host = socket.gethostname()\r
-            data.fields['agenthost'] = cluster_host\r
-            data.tags['agenthost_domain_id'] = cluster_id\r
-            data.tags['host'] = cluster_host\r
-            data.fields['cmd'] = str(cypher_cmd)\r
-            self.data.append(data)\r
-\r
-    def _osd_contains_disk(self):\r
-        cluster_id = self._cluster_id\r
-        cluster_host = socket.gethostname()\r
-        for d_name, d_node in self._dev_nodes.items():\r
-            keys = {'data_osd': 'DataDiskOfOSD',\r
-                    'fs_journal_osd': 'FsJournalDiskOfOSD',\r
-                    'bs_db_osd': 'BsDBDiskOfOSD',\r
-                    'bs_wal_osd': 'BsWalDiskOfOSD'}\r
-            for k, v in keys.items():\r
-                if not d_node.meta.get(k):\r
-                    continue\r
-                for osdid in d_node.meta.get(k, '').split(','):\r
-                    data = DBRelay()\r
-                    osd_node = self._osd_nodes.get(str(osdid))\r
-                    if not osd_node:\r
-                        continue\r
-                    # add disk node relationship\r
-                    cypher_cmd = CypherOP.add_link(\r
-                        osd_node,\r
-                        d_node,\r
-                        v)\r
-                    data.fields['agenthost'] = cluster_host\r
-                    data.tags['agenthost_domain_id'] = cluster_id\r
-                    data.tags['host'] = cluster_host\r
-                    data.fields['cmd'] = str(cypher_cmd)\r
-                    self.data.append(data)\r
-\r
-            hostname = d_node.meta.get('host', '')\r
-            if not hostname:\r
-                continue\r
-            host_node = self._host_nodes.get(hostname)\r
-            if not host_node:\r
-                continue\r
-            # add osd node relationship\r
-            data = DBRelay()\r
-            cypher_cmd = CypherOP.add_link(\r
-                host_node,\r
-                d_node,\r
-                'VmHostContainsVmDisk'\r
-            )\r
-            data.fields['agenthost'] = cluster_host\r
-            data.tags['agenthost_domain_id'] = cluster_id\r
-            data.tags['host'] = cluster_host\r
-            data.fields['cmd'] = str(cypher_cmd)\r
-            self.data.append(data)\r
-\r
-    def _pool_contains_osd(self):\r
-        cluster_id = self._cluster_id\r
-        cluster_host = socket.gethostname()\r
-        for p_id, p_node in self._pool_nodes.items():\r
-            for o_id in p_node.meta.get('osd_ids', '').split(','):\r
-                osd_node = self._osd_nodes.get(str(o_id))\r
-                if not osd_node:\r
-                    continue\r
-                data = DBRelay()\r
-                cypher_cmd = CypherOP.add_link(\r
-                    osd_node,\r
-                    p_node,\r
-                    'OsdContainsPool'\r
-                )\r
-                data.fields['agenthost'] = cluster_host\r
-                data.tags['agenthost_domain_id'] = cluster_id\r
-                data.tags['host'] = cluster_host\r
-                data.fields['cmd'] = str(cypher_cmd)\r
-                self.data.append(data)\r
-\r
-    def _pool_contains_rbd(self):\r
-        cluster_id = self._cluster_id\r
-        cluster_host = socket.gethostname()\r
-        for p_id, p_node in self._pool_nodes.items():\r
-            for rbd_node in self._rbd_nodes.get(str(p_id), []):\r
-                if not rbd_node:\r
-                    continue\r
-                data = DBRelay()\r
-                cypher_cmd = CypherOP.add_link(\r
-                    p_node,\r
-                    rbd_node,\r
-                    'PoolContainsRBD'\r
-                )\r
-                data.fields['agenthost'] = cluster_host\r
-                data.tags['agenthost_domain_id'] = cluster_id\r
-                data.tags['host'] = cluster_host\r
-                data.fields['cmd'] = str(cypher_cmd)\r
-                self.data.append(data)\r
-\r
-    def _pool_contains_fs(self):\r
-        cluster_id = self._cluster_id\r
-        cluster_host = socket.gethostname()\r
-        for fs_id, fs_node in self._fs_nodes.items():\r
-            pool_attrs = ['metadata_pool', 'data_pools']\r
-            for p_attr in pool_attrs:\r
-                pools_id = fs_node.meta.get(p_attr).split(',')\r
-                for p_id in pools_id:\r
-                    p_node = self._pool_nodes.get(str(p_id))\r
-                    if p_node:\r
-                        data = DBRelay()\r
-                        cypher_cmd = CypherOP.add_link(\r
-                            p_node,\r
-                            fs_node,\r
-                            'MetadataPoolContainsFS' if p_attr == 'metadata_pool' else 'DataPoolContainsFS'\r
-                        )\r
-                        data.fields['agenthost'] = cluster_host\r
-                        data.tags['agenthost_domain_id'] = cluster_id\r
-                        data.tags['host'] = cluster_host\r
-                        data.fields['cmd'] = str(cypher_cmd)\r
-                        self.data.append(data)\r
-            for mds_name in fs_node.meta.get('mds_nodes', '').split(','):\r
-                mds_node = self._mds_nodes.get(mds_name)\r
-                if not mds_node:\r
-                    continue\r
-                data = DBRelay()\r
-                cypher_cmd = CypherOP.add_link(\r
-                    mds_node,\r
-                    fs_node,\r
-                    'MDSContainsFS'\r
-                )\r
-                data.fields['agenthost'] = cluster_host\r
-                data.tags['agenthost_domain_id'] = cluster_id\r
-                data.tags['host'] = cluster_host\r
-                data.fields['cmd'] = str(cypher_cmd)\r
-                self.data.append(data)\r
-\r
-    def _collect_data(self):\r
-        if not self._module_inst:\r
-            return\r
-        job_name = ['cluster_contains_host', 'host_contains_mon', 'host_contains_mds', 'host_contains_osd', 'osd_contains_disk',\r
-                    'pool_contains_osd', 'pool_contains_rbd', 'pool_contains_fs']\r
-        for job in job_name:\r
-            fn = getattr(self, '_%s' % job)\r
-            if not fn:\r
-                continue\r
-            try:\r
-                fn()\r
-            except Exception as e:\r
-                self._module_inst.log.error('dbrelay - execute function {} fail, due to {}'.format(job, str(e)))\r
-                continue\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_agent.py
deleted file mode 100644 (file)
index 81fbdf9..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-from __future__ import absolute_import
-
-import socket
-import time
-
-from . import AGENT_VERSION, MetricsAgent, MetricsField
-from ...common import DP_MGR_STAT_FAILED, DP_MGR_STAT_WARNING
-from ...common.clusterdata import ClusterAPI
-
-
-class SAIAgentFields(MetricsField):
-    """ SAI DiskSmart structure """
-    measurement = 'sai_agent'
-
-    def __init__(self):
-        super(SAIAgentFields, self).__init__()
-        self.tags['agenthost_domain_id'] = None
-        self.fields['agent_type'] = str('ceph')
-        self.fields['agent_version'] = str(AGENT_VERSION)
-        self.fields['agenthost'] = ''
-        self.fields['cluster_domain_id'] = ''
-        self.fields['heartbeat_interval'] = ''
-        self.fields['host_ip'] = ''
-        self.fields['host_name'] = ''
-        self.fields['is_error'] = False
-        self.fields['is_ceph_error'] = False
-        self.fields['needs_warning'] = False
-        self.fields['send'] = None
-
-
-class SAIAgent(MetricsAgent):
-    measurement = 'sai_agent'
-
-    def _collect_data(self):
-        mgr_id = []
-        c_data = SAIAgentFields()
-        obj_api = ClusterAPI(self._module_inst)
-        svc_data = obj_api.get_server(socket.gethostname())
-        cluster_state = obj_api.get_health_status()
-        if not svc_data:
-            raise Exception('unable to get %s service info' % socket.gethostname())
-        # Filter mgr id
-        for s in svc_data.get('services', []):
-            if s.get('type', '') == 'mgr':
-                mgr_id.append(s.get('id'))
-
-        for _id in mgr_id:
-            mgr_meta = obj_api.get_mgr_metadata(_id)
-            cluster_id = obj_api.get_cluster_id()
-            c_data.fields['cluster_domain_id'] = str(cluster_id)
-            c_data.fields['agenthost'] = str(socket.gethostname())
-            c_data.tags['agenthost_domain_id'] = cluster_id
-            c_data.fields['heartbeat_interval'] = \
-                int(obj_api.get_configuration('diskprediction_upload_metrics_interval'))
-            c_data.fields['host_ip'] = str(mgr_meta.get('addr', '127.0.0.1'))
-            c_data.fields['host_name'] = str(socket.gethostname())
-            if obj_api.module.status.get('status', '') in [DP_MGR_STAT_WARNING, DP_MGR_STAT_FAILED]:
-                c_data.fields['is_error'] = bool(True)
-            else:
-                c_data.fields['is_error'] = bool(False)
-            if cluster_state in ['HEALTH_ERR', 'HEALTH_WARN']:
-                c_data.fields['is_ceph_error'] = bool(True)
-                c_data.fields['needs_warning'] = bool(True)
-                c_data.fields['is_error'] = bool(True)
-                c_data.fields['problems'] = str(obj_api.get_health_checks())
-            else:
-                c_data.fields['is_ceph_error'] = bool(False)
-            c_data.fields['send'] = int(time.time() * 1000)
-            self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_cluster.py
deleted file mode 100644 (file)
index d444f9a..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-from __future__ import absolute_import
-
-import socket
-
-from . import AGENT_VERSION, MetricsAgent, MetricsField
-from ...common.clusterdata import ClusterAPI
-
-
-class SAIClusterFields(MetricsField):
-    """ SAI Host structure """
-    measurement = 'sai_cluster'
-
-    def __init__(self):
-        super(SAIClusterFields, self).__init__()
-        self.tags['domain_id'] = None
-        self.fields['agenthost'] = None
-        self.fields['agenthost_domain_id'] = None
-        self.fields['name'] = None
-        self.fields['agent_version'] = str(AGENT_VERSION)
-
-
-class SAICluserAgent(MetricsAgent):
-    measurement = 'sai_cluster'
-
-    def _collect_data(self):
-        c_data = SAIClusterFields()
-        obj_api = ClusterAPI(self._module_inst)
-        cluster_id = obj_api.get_cluster_id()
-
-        c_data.tags['domain_id'] = str(cluster_id)
-        c_data.tags['host_domain_id'] = '%s_%s' % (str(cluster_id), str(socket.gethostname()))
-        c_data.fields['agenthost'] = str(socket.gethostname())
-        c_data.tags['agenthost_domain_id'] = cluster_id
-        c_data.fields['name'] = 'Ceph mgr plugin'
-        self.data.append(c_data)
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk.py
deleted file mode 100644 (file)
index 3b177e6..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import AGENT_VERSION, MetricsAgent, MetricsField\r
-from ...common import get_human_readable\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class SAIDiskFields(MetricsField):\r
-    """ SAI Disk structure """\r
-    measurement = 'sai_disk'\r
-\r
-    def __init__(self):\r
-        super(SAIDiskFields, self).__init__()\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['disk_domain_id'] = None\r
-        self.tags['disk_name'] = None\r
-        self.tags['disk_wwn'] = None\r
-        self.tags['primary_key'] = None\r
-        self.fields['cluster_domain_id'] = None\r
-        self.fields['host_domain_id'] = None\r
-        self.fields['model'] = None\r
-        self.fields['serial_number'] = None\r
-        self.fields['size'] = None\r
-        self.fields['vendor'] = None\r
-        self.fields['agent_version'] = str(AGENT_VERSION)\r
-\r
-        """disk_status\r
-        0: unknown  1: good     2: failure\r
-        """\r
-        self.fields['disk_status'] = 0\r
-\r
-        """disk_type\r
-        0: unknown  1: HDD      2: SSD      3: SSD NVME\r
-        4: SSD SAS  5: SSD SATA 6: HDD SAS  7: HDD SATA\r
-        """\r
-        self.fields['disk_type'] = 0\r
-\r
-\r
-class SAIDiskAgent(MetricsAgent):\r
-    measurement = 'sai_disk'\r
-\r
-    @staticmethod\r
-    def _convert_disk_type(is_ssd, sata_version, protocol):\r
-        """ return type:\r
-            0: "Unknown', 1: 'HDD',\r
-            2: 'SSD",     3: "SSD NVME",\r
-            4: "SSD SAS", 5: "SSD SATA",\r
-            6: "HDD SAS", 7: "HDD SATA"\r
-        """\r
-        if is_ssd:\r
-            if sata_version and not protocol:\r
-                disk_type = 5\r
-            elif 'SCSI'.lower() in protocol.lower():\r
-                disk_type = 4\r
-            elif 'NVMe'.lower() in protocol.lower():\r
-                disk_type = 3\r
-            else:\r
-                disk_type = 2\r
-        else:\r
-            if sata_version and not protocol:\r
-                disk_type = 7\r
-            elif 'SCSI'.lower() in protocol.lower():\r
-                disk_type = 6\r
-            else:\r
-                disk_type = 1\r
-        return disk_type\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        cluster_id = obj_api.get_cluster_id()\r
-        osds = obj_api.get_osds()\r
-        for osd in osds:\r
-            if osd.get('osd') is None:\r
-                continue\r
-            if not osd.get('in'):\r
-                continue\r
-            osds_meta = obj_api.get_osd_metadata(osd.get('osd'))\r
-            if not osds_meta:\r
-                continue\r
-            osds_smart = obj_api.get_osd_smart(osd.get('osd'))\r
-            if not osds_smart:\r
-                continue\r
-            for dev_name, s_val in osds_smart.items():\r
-                d_data = SAIDiskFields()\r
-                d_data.tags['disk_name'] = str(dev_name)\r
-                d_data.fields['cluster_domain_id'] = str(cluster_id)\r
-                d_data.tags['host_domain_id'] = \\r
-                    str('%s_%s'\r
-                        % (cluster_id, osds_meta.get('hostname', 'None')))\r
-                d_data.fields['agenthost'] = str(socket.gethostname())\r
-                d_data.tags['agenthost_domain_id'] = cluster_id\r
-                serial_number = s_val.get('serial_number')\r
-                wwn = s_val.get('wwn', {})\r
-                wwpn = ''\r
-                if wwn:\r
-                    wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))\r
-                    for k in wwn.keys():\r
-                        if k in ['naa', 't10', 'eui', 'iqn']:\r
-                            wwpn = ('%X%s' % (wwn[k], wwpn)).lower()\r
-                            break\r
-\r
-                if wwpn:\r
-                    d_data.tags['disk_domain_id'] = str(dev_name)\r
-                    d_data.tags['disk_wwn'] = str(wwpn)\r
-                    if serial_number:\r
-                        d_data.fields['serial_number'] = str(serial_number)\r
-                    else:\r
-                        d_data.fields['serial_number'] = str(wwpn)\r
-                elif serial_number:\r
-                    d_data.tags['disk_domain_id'] = str(dev_name)\r
-                    d_data.fields['serial_number'] = str(serial_number)\r
-                    if wwpn:\r
-                        d_data.tags['disk_wwn'] = str(wwpn)\r
-                    else:\r
-                        d_data.tags['disk_wwn'] = str(serial_number)\r
-                else:\r
-                    d_data.tags['disk_domain_id'] = str(dev_name)\r
-                    d_data.tags['disk_wwn'] = str(dev_name)\r
-                    d_data.fields['serial_number'] = str(dev_name)\r
-                d_data.tags['primary_key'] = \\r
-                    str('%s%s%s'\r
-                        % (cluster_id, d_data.tags['host_domain_id'],\r
-                           d_data.tags['disk_domain_id']))\r
-                d_data.fields['disk_status'] = int(1)\r
-                is_ssd = True if s_val.get('rotation_rate') == 0 else False\r
-                vendor = s_val.get('vendor', None)\r
-                model = s_val.get('model_name', None)\r
-                if s_val.get('sata_version', {}).get('string'):\r
-                    sata_version = s_val['sata_version']['string']\r
-                else:\r
-                    sata_version = ''\r
-                if s_val.get('device', {}).get('protocol'):\r
-                    protocol = s_val['device']['protocol']\r
-                else:\r
-                    protocol = ''\r
-                d_data.fields['disk_type'] = \\r
-                    self._convert_disk_type(is_ssd, sata_version, protocol)\r
-                d_data.fields['firmware_version'] = \\r
-                    str(s_val.get('firmware_version'))\r
-                if model:\r
-                    d_data.fields['model'] = str(model)\r
-                if vendor:\r
-                    d_data.fields['vendor'] = str(vendor)\r
-                if sata_version:\r
-                    d_data.fields['sata_version'] = str(sata_version)\r
-                if s_val.get('logical_block_size'):\r
-                    d_data.fields['sector_size'] = \\r
-                        str(str(s_val['logical_block_size']))\r
-                d_data.fields['transport_protocol'] = str('')\r
-                d_data.fields['vendor'] = \\r
-                    str(s_val.get('model_family', '')).replace('\"', '\'')\r
-                try:\r
-                    if isinstance(s_val.get('user_capacity'), dict):\r
-                        if isinstance(s_val['user_capacity'].get('bytes'), dict):\r
-                            user_capacity = \\r
-                                s_val['user_capacity'].get('bytes', {}).get('n', 0)\r
-                        else:\r
-                            user_capacity = s_val['user_capacity'].get('bytes')\r
-                    else:\r
-                        user_capacity = s_val.get('user_capacity', 0)\r
-                except ValueError:\r
-                    user_capacity = 0\r
-                if str(user_capacity).isdigit():\r
-                    d_data.fields['size'] = get_human_readable(int(user_capacity), 0)\r
-                else:\r
-                    d_data.fields['size'] = str(user_capacity)\r
-                if s_val.get('smart_status', {}).get('passed'):\r
-                    d_data.fields['smart_health_status'] = 'PASSED'\r
-                else:\r
-                    d_data.fields['smart_health_status'] = 'FAILED'\r
-                self.data.append(d_data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_disk_smart.py
deleted file mode 100644 (file)
index 1ad3478..0000000
+++ /dev/null
@@ -1,182 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import datetime\r
-import json\r
-import _strptime\r
-import socket\r
-import time\r
-\r
-from . import AGENT_VERSION, MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class SAIDiskSmartFields(MetricsField):\r
-    """ SAI DiskSmart structure """\r
-    measurement = 'sai_disk_smart'\r
-\r
-    def __init__(self):\r
-        super(SAIDiskSmartFields, self).__init__()\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.tags['disk_domain_id'] = None\r
-        self.tags['disk_name'] = None\r
-        self.tags['disk_wwn'] = None\r
-        self.tags['primary_key'] = None\r
-        self.fields['cluster_domain_id'] = None\r
-        self.fields['host_domain_id'] = None\r
-        self.fields['agent_version'] = str(AGENT_VERSION)\r
-\r
-\r
-class SAIDiskSmartAgent(MetricsAgent):\r
-    measurement = 'sai_disk_smart'\r
-\r
-    def _collect_data(self):\r
-        # process data and save to 'self.data'\r
-        obj_api = ClusterAPI(self._module_inst)\r
-        cluster_id = obj_api.get_cluster_id()\r
-        osds = obj_api.get_osds()\r
-        for osd in osds:\r
-            if osd.get('osd') is None:\r
-                continue\r
-            if not osd.get('in'):\r
-                continue\r
-            osds_meta = obj_api.get_osd_metadata(osd.get('osd'))\r
-            if not osds_meta:\r
-                continue\r
-            devs_info = obj_api.get_osd_device_id(osd.get('osd'))\r
-            if devs_info:\r
-                for dev_name, dev_info in devs_info.items():\r
-                    osds_smart = obj_api.get_device_health(dev_info['dev_id'])\r
-                    if not osds_smart:\r
-                        continue\r
-                    # Always pass through last smart data record\r
-                    o_key = sorted(osds_smart.keys(), reverse=True)[0]\r
-                    if o_key:\r
-                        s_date = o_key\r
-                        s_val = osds_smart[s_date]\r
-                        smart_data = SAIDiskSmartFields()\r
-                        smart_data.tags['disk_name'] = str(dev_name)\r
-                        smart_data.fields['cluster_domain_id'] = str(cluster_id)\r
-                        smart_data.tags['host_domain_id'] = \\r
-                            str('%s_%s'\r
-                                % (cluster_id, osds_meta.get('hostname', 'None')))\r
-                        smart_data.fields['agenthost'] = str(socket.gethostname())\r
-                        smart_data.tags['agenthost_domain_id'] = cluster_id\r
-                        # parse attributes\r
-                        protocol = s_val.get('device', {}).get('protocol', '')\r
-                        if str(protocol).lower() == 'nvme':\r
-                            nvme_info = s_val.get('nvme_smart_health_information_log', {})\r
-                            smart_data['CriticalWarniing_raw'] = int(nvme_info.get('critical_warning', 0))\r
-                            smart_data['CurrentDriveTemperature_raw'] = int(nvme_info.get('temperature', 0))\r
-                            smart_data['AvailableSpare_raw'] = int(nvme_info.get('available_spare', 0))\r
-                            smart_data['AvailableSpareThreshold_raw'] = int(nvme_info.get('available_spare_threshold', 0))\r
-                            smart_data['PercentageUsed_raw'] = int(nvme_info.get('percentage_used', 0))\r
-                            smart_data['DataUnitsRead_raw'] = int(nvme_info.get('data_units_read', 0))\r
-                            smart_data['DataUnitsRead'] = int(nvme_info.get('data_units_written', 0))\r
-                            smart_data['HostReadCommands_raw'] = int(nvme_info.get('host_reads', 0))\r
-                            smart_data['HostWriteCommands_raw'] = int(nvme_info.get('host_writes', 0))\r
-                            smart_data['ControllerBusyTime_raw'] = int(nvme_info.get('controller_busy_time', 0))\r
-                            smart_data['PowerCycles_raw'] = int(nvme_info.get('power_cycles', 0))\r
-                            smart_data['PowerOnHours_raw'] = int(nvme_info.get('power_on_hours', 0))\r
-                            smart_data['UnsafeShutdowns_raw'] = int(nvme_info.get('unsafe_shutdowns', 0))\r
-                            smart_data['MediaandDataIntegrityErrors_raw'] = int(nvme_info.get('media_errors', 0))\r
-                            smart_data['ErrorInformationLogEntries'] = int(nvme_info.get('num_err_log_entries'))\r
-                            nvme_addition = s_val.get('nvme_smart_health_information_add_log', {})\r
-                            for k, v in nvme_addition.get("Device stats", {}).items():\r
-                                if v.get('raw') is None:\r
-                                    continue\r
-                                if isinstance(v.get('raw'), int):\r
-                                    smart_data[k] = int(v['raw'])\r
-                                else:\r
-                                    smart_data[k] = str(v.get('raw'))\r
-                        else:\r
-                            ata_smart = s_val.get('ata_smart_attributes', {})\r
-                            for attr in ata_smart.get('table', []):\r
-                                if attr.get('raw', {}).get('string'):\r
-                                    if str(attr.get('raw', {}).get('string', '0')).isdigit():\r
-                                        smart_data.fields['%s_raw' % attr.get('id')] = \\r
-                                            int(attr.get('raw', {}).get('string', '0'))\r
-                                    else:\r
-                                        if str(attr.get('raw', {}).get('string', '0')).split(' ')[0].isdigit():\r
-                                            smart_data.fields['%s_raw' % attr.get('id')] = \\r
-                                                int(attr.get('raw', {}).get('string', '0').split(' ')[0])\r
-                                        else:\r
-                                            smart_data.fields['%s_raw' % attr.get('id')] = \\r
-                                                attr.get('raw', {}).get('value', 0)\r
-                            smart_data.fields['raw_data'] = str(json.dumps(osds_smart[s_date]).replace("\"", "\'"))\r
-                            if s_val.get('temperature', {}).get('current') is not None:\r
-                                smart_data.fields['CurrentDriveTemperature_raw'] = \\r
-                                    int(s_val['temperature']['current'])\r
-                            if s_val.get('temperature', {}).get('drive_trip') is not None:\r
-                                smart_data.fields['DriveTripTemperature_raw'] = \\r
-                                    int(s_val['temperature']['drive_trip'])\r
-                            if s_val.get('elements_grown_list') is not None:\r
-                                smart_data.fields['ElementsInGrownDefectList_raw'] = int(s_val['elements_grown_list'])\r
-                            if s_val.get('power_on_time', {}).get('hours') is not None:\r
-                                smart_data.fields['9_raw'] = int(s_val['power_on_time']['hours'])\r
-                            if s_val.get('scsi_percentage_used_endurance_indicator') is not None:\r
-                                smart_data.fields['PercentageUsedEnduranceIndicator_raw'] = \\r
-                                    int(s_val['scsi_percentage_used_endurance_indicator'])\r
-                            if s_val.get('scsi_error_counter_log') is not None:\r
-                                s_err_counter = s_val['scsi_error_counter_log']\r
-                                for s_key in s_err_counter.keys():\r
-                                    if s_key.lower() in ['read', 'write']:\r
-                                        for s1_key in s_err_counter[s_key].keys():\r
-                                            if s1_key.lower() == 'errors_corrected_by_eccfast':\r
-                                                smart_data.fields['ErrorsCorrectedbyECCFast%s_raw' % s_key.capitalize()] = \\r
-                                                    int(s_err_counter[s_key]['errors_corrected_by_eccfast'])\r
-                                            elif s1_key.lower() == 'errors_corrected_by_eccdelayed':\r
-                                                smart_data.fields['ErrorsCorrectedbyECCDelayed%s_raw' % s_key.capitalize()] = \\r
-                                                    int(s_err_counter[s_key]['errors_corrected_by_eccdelayed'])\r
-                                            elif s1_key.lower() == 'errors_corrected_by_rereads_rewrites':\r
-                                                smart_data.fields['ErrorCorrectedByRereadsRewrites%s_raw' % s_key.capitalize()] = \\r
-                                                    int(s_err_counter[s_key]['errors_corrected_by_rereads_rewrites'])\r
-                                            elif s1_key.lower() == 'total_errors_corrected':\r
-                                                smart_data.fields['TotalErrorsCorrected%s_raw' % s_key.capitalize()] = \\r
-                                                    int(s_err_counter[s_key]['total_errors_corrected'])\r
-                                            elif s1_key.lower() == 'correction_algorithm_invocations':\r
-                                                smart_data.fields['CorrectionAlgorithmInvocations%s_raw' % s_key.capitalize()] = \\r
-                                                    int(s_err_counter[s_key]['correction_algorithm_invocations'])\r
-                                            elif s1_key.lower() == 'gigabytes_processed':\r
-                                                smart_data.fields['GigaBytesProcessed%s_raw' % s_key.capitalize()] = \\r
-                                                    float(s_err_counter[s_key]['gigabytes_processed'])\r
-                                            elif s1_key.lower() == 'total_uncorrected_errors':\r
-                                                smart_data.fields['TotalUncorrectedErrors%s_raw' % s_key.capitalize()] = \\r
-                                                    int(s_err_counter[s_key]['total_uncorrected_errors'])\r
-\r
-                        serial_number = s_val.get('serial_number')\r
-                        wwn = s_val.get('wwn', {})\r
-                        wwpn = ''\r
-                        if wwn:\r
-                            wwpn = '%06X%X' % (wwn.get('oui', 0), wwn.get('id', 0))\r
-                            for k in wwn.keys():\r
-                                if k in ['naa', 't10', 'eui', 'iqn']:\r
-                                    wwpn = ('%X%s' % (wwn[k], wwpn)).lower()\r
-                                    break\r
-                        if wwpn:\r
-                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
-                            smart_data.tags['disk_wwn'] = str(wwpn)\r
-                            if serial_number:\r
-                                smart_data.fields['serial_number'] = str(serial_number)\r
-                            else:\r
-                                smart_data.fields['serial_number'] = str(wwpn)\r
-                        elif serial_number:\r
-                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
-                            smart_data.fields['serial_number'] = str(serial_number)\r
-                            if wwpn:\r
-                                smart_data.tags['disk_wwn'] = str(wwpn)\r
-                            else:\r
-                                smart_data.tags['disk_wwn'] = str(serial_number)\r
-                        else:\r
-                            smart_data.tags['disk_domain_id'] = str(dev_info['dev_id'])\r
-                            smart_data.tags['disk_wwn'] = str(dev_name)\r
-                            smart_data.fields['serial_number'] = str(dev_name)\r
-                        smart_data.tags['primary_key'] = \\r
-                            str('%s%s%s'\r
-                                % (cluster_id,\r
-                                   smart_data.tags['host_domain_id'],\r
-                                   smart_data.tags['disk_domain_id']))\r
-                        smart_data.timestamp = \\r
-                            time.mktime(datetime.datetime.strptime(\r
-                                s_date, '%Y%m%d-%H%M%S').timetuple())\r
-                        self.data.append(smart_data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py b/src/pybind/mgr/diskprediction_cloud/agent/metrics/sai_host.py
deleted file mode 100644 (file)
index 0f3698a..0000000
+++ /dev/null
@@ -1,105 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import socket\r
-\r
-from . import AGENT_VERSION, MetricsAgent, MetricsField\r
-from ...common.clusterdata import ClusterAPI\r
-\r
-\r
-class SAIHostFields(MetricsField):\r
-    """ SAI Host structure """\r
-    measurement = 'sai_host'\r
-\r
-    def __init__(self):\r
-        super(SAIHostFields, self).__init__()\r
-        self.tags['domain_id'] = None\r
-        self.fields['agenthost'] = None\r
-        self.tags['agenthost_domain_id'] = None\r
-        self.fields['cluster_domain_id'] = None\r
-        self.fields['name'] = None\r
-        self.fields['host_ip'] = None\r
-        self.fields['host_ipv6'] = None\r
-        self.fields['host_uuid'] = None\r
-        self.fields['os_type'] = str('ceph')\r
-        self.fields['os_name'] = None\r
-        self.fields['os_version'] = None\r
-        self.fields['agent_version'] = str(AGENT_VERSION)\r
-\r
-\r
-class SAIHostAgent(MetricsAgent):\r
-    measurement = 'sai_host'\r
-\r
-    def _collect_data(self):\r
-        db = ClusterAPI(self._module_inst)\r
-        cluster_id = db.get_cluster_id()\r
-\r
-        hosts = set()\r
-\r
-        # Parse osd's host\r
-        osd_data = db.get_osds()\r
-        for _data in osd_data:\r
-            osd_id = _data['osd']\r
-            if not _data.get('in'):\r
-                continue\r
-            osd_addr = _data['public_addr'].split(':')[0]\r
-            osd_metadata = db.get_osd_metadata(osd_id)\r
-            if osd_metadata:\r
-                osd_host = osd_metadata.get('hostname', 'None')\r
-                if osd_host not in hosts:\r
-                    data = SAIHostFields()\r
-                    data.fields['agenthost'] = str(socket.gethostname())\r
-                    data.tags['agenthost_domain_id'] = cluster_id\r
-                    data.tags['domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, osd_host))\r
-                    data.fields['cluster_domain_id'] = str(cluster_id)\r
-                    data.fields['host_ip'] = osd_addr\r
-                    data.fields['host_uuid'] = \\r
-                        str('%s_%s' % (cluster_id, osd_host))\r
-                    data.fields['os_name'] = \\r
-                        osd_metadata.get('ceph_release', '')\r
-                    data.fields['os_version'] = \\r
-                        osd_metadata.get('ceph_version_short', '')\r
-                    data.fields['name'] = 'osd_{}'.format(osd_host)\r
-                    hosts.add(osd_host)\r
-                    self.data.append(data)\r
-\r
-        # Parse mon node host\r
-        mons = db.get_mons()\r
-        for _data in mons:\r
-            mon_host = _data['name']\r
-            mon_addr = _data['public_addr'].split(':')[0]\r
-            if mon_host not in hosts:\r
-                data = SAIHostFields()\r
-                data.fields['agenthost'] = str(socket.gethostname())\r
-                data.tags['agenthost_domain_id'] = cluster_id\r
-                data.tags['domain_id'] = \\r
-                    str('%s_%s' % (cluster_id, mon_host))\r
-                data.fields['cluster_domain_id'] = str(cluster_id)\r
-                data.fields['host_ip'] = mon_addr\r
-                data.fields['host_uuid'] = \\r
-                    str('%s_%s' % (cluster_id, mon_host))\r
-                data.fields['name'] = 'mon_{}'.format(mon_host)\r
-                hosts.add((mon_host, mon_addr))\r
-                self.data.append(data)\r
-\r
-        # Parse fs host\r
-        file_systems = db.get_file_systems()\r
-        for _data in file_systems:\r
-            mds_info = _data.get('mdsmap').get('info')\r
-            for _gid in mds_info:\r
-                mds_data = mds_info[_gid]\r
-                mds_addr = mds_data.get('addr').split(':')[0]\r
-                mds_host = mds_data.get('name')\r
-                if mds_host not in hosts:\r
-                    data = SAIHostFields()\r
-                    data.fields['agenthost'] = str(socket.gethostname())\r
-                    data.tags['agenthost_domain_id'] = cluster_id\r
-                    data.tags['domain_id'] = \\r
-                        str('%s_%s' % (cluster_id, mds_host))\r
-                    data.fields['cluster_domain_id'] = str(cluster_id)\r
-                    data.fields['host_ip'] = mds_addr\r
-                    data.fields['host_uuid'] = \\r
-                        str('%s_%s' % (cluster_id, mds_host))\r
-                    data.fields['name'] = 'mds_{}'.format(mds_host)\r
-                    hosts.add((mds_host, mds_addr))\r
-                    self.data.append(data)\r
diff --git a/src/pybind/mgr/diskprediction_cloud/agent/predictor.py b/src/pybind/mgr/diskprediction_cloud/agent/predictor.py
deleted file mode 100644 (file)
index 1fdea46..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-from __future__ import absolute_import
-
-
-class PredictAgent(object):
-
-    measurement = 'predictor'
-
-    def __init__(self, mgr_module, obj_sender, timeout=30):
-        self.data = []
-        self._client = None
-        self._client = obj_sender
-        self._logger = mgr_module.log
-        self._module_inst = mgr_module
-        self._timeout = timeout
-
-    def __nonzero__(self):
-        if not self._module_inst:
-            return False
-        else:
-            return True
-
-    def run(self):
-        result = self._module_inst.get('devices')
-        cluster_id = self._module_inst.get('mon_map').get('fsid')
-        if not result:
-            return -1, '', 'unable to get all devices for prediction'
-        for dev in result.get('devices', []):
-            for location in dev.get('location', []):
-                host = location.get('host')
-                host_domain_id = '{}_{}'.format(cluster_id, host)
-                prediction_data = self._get_cloud_prediction_result(host_domain_id, dev.get('devid'))
-                if prediction_data:
-                    self._module_inst.prediction_result[dev.get('devid')] = prediction_data
-
-    def _get_cloud_prediction_result(self, host_domain_id, disk_domain_id):
-        result = {}
-        try:
-            query_info = self._client.query_info(host_domain_id, disk_domain_id, 'sai_disk_prediction')
-            status_code = query_info.status_code
-            if status_code == 200:
-                result = query_info.json()
-            else:
-                resp = query_info.json()
-                if resp.get('error'):
-                    self._logger.error(str(resp['error']))
-        except Exception as e:
-            self._logger.error('failed to get %s prediction result %s' % (disk_domain_id, str(e)))
-        return result
diff --git a/src/pybind/mgr/diskprediction_cloud/common/__init__.py b/src/pybind/mgr/diskprediction_cloud/common/__init__.py
deleted file mode 100644 (file)
index be40941..0000000
+++ /dev/null
@@ -1,59 +0,0 @@
-from __future__ import absolute_import\r
-import errno\r
-from functools import wraps\r
-import os\r
-import signal\r
-\r
-\r
-DP_MGR_STAT_OK = 'OK'\r
-DP_MGR_STAT_WARNING = 'WARNING'\r
-DP_MGR_STAT_FAILED = 'FAILED'\r
-DP_MGR_STAT_DISABLED = 'DISABLED'\r
-DP_MGR_STAT_ENABLED = 'ENABLED'\r
-\r
-\r
-class DummyResonse:\r
-    def __init__(self):\r
-        self.resp_json = dict()\r
-        self.content = 'DummyResponse'\r
-        self.status_code = 404\r
-\r
-    def json(self):\r
-        return self.resp_json\r
-\r
-    def __str__(self):\r
-        return '{}'.format({'resp': self.resp_json, 'content': self.content, 'status_code': self.status_code})\r
-\r
-\r
-class TimeoutError(Exception):\r
-    def __init__(self):\r
-        super(TimeoutError, self).__init__("Timer expired")\r
-\r
-\r
-def timeout(func):\r
-    DEFAULT_TIMEOUT = 10\r
-\r
-    def _handle_timeout(signum, frame):\r
-        raise TimeoutError()\r
-\r
-    @wraps(func)\r
-    def wrapper(self):\r
-        signal.signal(signal.SIGALRM, _handle_timeout)\r
-        signal.alarm(getattr(self, '_timeout', DEFAULT_TIMEOUT))\r
-        try:\r
-            return func(self)\r
-        finally:\r
-            signal.alarm(0)\r
-\r
-    return wrapper\r
-\r
-\r
-def get_human_readable(size, precision=2):\r
-    suffixes = ['B', 'KB', 'MB', 'GB', 'TB']\r
-    suffix_index = 0\r
-    while size > 1000 and suffix_index < 4:\r
-        # increment the index of the suffix\r
-        suffix_index += 1\r
-        # apply the division\r
-        size = size/1000.0\r
-    return '%.*d %s' % (precision, size, suffixes[suffix_index])\r
diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2.py
deleted file mode 100644 (file)
index 9f65c73..0000000
+++ /dev/null
@@ -1,1775 +0,0 @@
-# Generated by the protocol buffer compiler.  DO NOT EDIT!
-# source: mainServer.proto
-
-import sys
-_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
-from google.protobuf import descriptor as _descriptor
-from google.protobuf import message as _message
-from google.protobuf import reflection as _reflection
-from google.protobuf import symbol_database as _symbol_database
-from google.protobuf import descriptor_pb2
-# @@protoc_insertion_point(imports)
-
-_sym_db = _symbol_database.Default()
-
-
-from google.api import annotations_pb2 as google_dot_api_dot_annotations__pb2
-
-
-DESCRIPTOR = _descriptor.FileDescriptor(
-  name='mainServer.proto',
-  package='proto',
-  syntax='proto3',
-  serialized_pb=_b('\n\x10mainServer.proto\x12\x05proto\x1a\x1cgoogle/api/annotations.proto\"\x07\n\x05\x45mpty\"#\n\x10GeneralMsgOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\")\n\x16GeneralHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\x1d\n\nPingOutout\x12\x0f\n\x07message\x18\x01 \x01(\t\"*\n\tTestInput\x12\x1d\n\x06people\x18\x01 \x03(\x0b\x32\r.proto.Person\"\xbe\x01\n\nTestOutput\x12\x10\n\x08strArray\x18\x01 \x03(\t\x12\x31\n\x08mapValue\x18\x02 \x03(\x0b\x32\x1f.proto.TestOutput.MapValueEntry\x12\x19\n\x02pn\x18\x04 \x01(\x0b\x32\r.proto.Person\x12\x1f\n\x07profile\x18\x03 \x03(\x0b\x32\x0e.proto.Profile\x1a/\n\rMapValueEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xcf\x01\n\x06Person\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\x03\x12\r\n\x05\x65mail\x18\x03 \x01(\t\x12)\n\x06phones\x18\x04 \x03(\x0b\x32\x19.proto.Person.PhoneNumber\x1a\x44\n\x0bPhoneNumber\x12\x0e\n\x06number\x18\x01 \x01(\t\x12%\n\x04type\x18\x02 \x01(\x0e\x32\x17.proto.Person.PhoneType\"+\n\tPhoneType\x12\n\n\x06MOBILE\x10\x00\x12\x08\n\x04HOME\x10\x01\x12\x08\n\x04WORK\x10\x02\"\xa9\x01\n\x07Profile\x12%\n\x08\x66ileInfo\x18\x01 \x01(\x0b\x32\x13.proto.Profile.File\x1aw\n\x04\x46ile\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x11\n\ttypeInt32\x18\x02 \x01(\x05\x12\x11\n\ttypeInt64\x18\x03 \x01(\x03\x12\x11\n\ttypeFloat\x18\x04 \x01(\x02\x12\x12\n\ntypeDouble\x18\x05 \x01(\x01\x12\x14\n\x0c\x62ooleanValue\x18\x06 \x01(\x08\"4\n\x15GetUsersByStatusInput\x12\x0e\n\x06status\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\":\n\x16GetUsersByStatusOutput\x12 \n\x05users\x18\x01 \x03(\x0b\x32\x11.proto.UserOutput\")\n\x16\x41\x63\x63ountHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\nLoginInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x10\n\x08password\x18\x02 \x01(\t\"\xf2\x01\n\nUserOutput\x12\n\n\x02id\x18\x01 \x01(\t\x12\r\n\x05\x65mail\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\x12\r\n\x05phone\x18\x04 \x01(\t\x12\x11\n\tfirstName\x18\x05 \x01(\t\x12\x10\n\x08lastName\x18\x06 \x01(\t\x12\x13\n\x0b\x63reatedTime\x18\x07 \x01(\t\x12\x11\n\tnamespace\x18\x08 \x01(\t\x12\x12\n\ndomainName\x18\t \x01(\t\x12\x0f\n\x07\x63ompany\x18\n \x01(\t\x12\x0b\n\x03url\x18\x0b \x01(\t\x12\x14\n\x0c\x61gentAccount\x18\x0c \x01(\t\x12\x15\n\ragentPassword\x18\r \x01(\t\"s\n\x0bSingupInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\r\n\x05phone\x18\x02 \x01(\t\x12\x11\n\tfirstName\x18\x03 \x01(\t\x12\x10\n\x08lastName\x18\x04 \x01(\t\x12\x10\n\x08password\x18\x05 \x01(\t\x12\x0f\n\x07\x63ompany\x18\x06 \x01(\t\"\x1f\n\x0cSingupOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"-\n\x0f\x44\x65leteUserInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"C\n\x15UpdateUserStatusInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t\"\'\n\x16ResendConfirmCodeInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\"+\n\x0c\x43onfirmInput\x12\r\n\x05\x65mail\x18\x01 \x01(\t\x12\x0c\n\x04\x63ode\x18\x02 \x01(\t\"$\n\x11\x44PHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"n\n\x17\x44PGetPhysicalDisksInput\x12\x0f\n\x07hostIds\x18\x01 \x01(\t\x12\x0b\n\x03ids\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"{\n\x19\x44PGetDisksPredictionInput\x12\x17\n\x0fphysicalDiskIds\x18\x01 \x01(\t\x12\x0e\n\x06status\x18\x02 \x01(\t\x12\r\n\x05limit\x18\x03 \x01(\x03\x12\x0c\n\x04page\x18\x04 \x01(\x03\x12\x0c\n\x04\x66rom\x18\x05 \x01(\t\x12\n\n\x02to\x18\x06 \x01(\t\"\x1e\n\x0e\x44PBinaryOutput\x12\x0c\n\x04\x64\x61ta\x18\x01 \x01(\x0c\",\n\x19\x43ollectionHeartbeatOutput\x12\x0f\n\x07message\x18\x01 \x01(\t\"\"\n\x10PostMetricsInput\x12\x0e\n\x06points\x18\x01 \x03(\t\" \n\x10PostDBRelayInput\x12\x0c\n\x04\x63mds\x18\x01 \x03(\t\":\n\x17\x43ollectionMessageOutput\x12\x0e\n\x06status\x18\x01 \x01(\x03\x12\x0f\n\x07message\x18\x02 \x01(\t2\x85\x02\n\x07General\x12\x63\n\x10GeneralHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.GeneralHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/general/heartbeat\x12\x46\n\x04Ping\x12\x0c.proto.Empty\x1a\x11.proto.PingOutout\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/general/ping\x12M\n\x04Test\x12\x10.proto.TestInput\x1a\x11.proto.TestOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/general/test:\x01*2\xa4\x06\n\x07\x41\x63\x63ount\x12\x63\n\x10\x41\x63\x63ountHeartbeat\x12\x0c.proto.Empty\x1a\x1d.proto.AccountHeartbeatOutput\"\"\x82\xd3\xe4\x93\x02\x1c\x12\x1a/apis/v2/account/heartbeat\x12N\n\x05Login\x12\x11.proto.LoginInput\x1a\x11.proto.UserOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\"\x14/apis/v2/users/login:\x01*\x12S\n\x06Signup\x12\x12.proto.SingupInput\x1a\x13.proto.SingupOutput\" \x82\xd3\xe4\x93\x02\x1a\"\x15/apis/v2/users/signup:\x01*\x12r\n\x11ResendConfirmCode\x12\x1d.proto.ResendConfirmCodeInput\x1a\x17.proto.GeneralMsgOutput\"%\x82\xd3\xe4\x93\x02\x1f\"\x1a/apis/v2/users/confirmcode:\x01*\x12_\n\x07\x43onfirm\x12\x13.proto.ConfirmInput\x1a\x17.proto.GeneralMsgOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/users/confirmation:\x01*\x12g\n\x10GetUsersByStatus\x12\x1c.proto.GetUsersByStatusInput\x1a\x1d.proto.GetUsersByStatusOutput\"\x16\x82\xd3\xe4\x93\x02\x10\x12\x0e/apis/v2/users\x12\x63\n\nDeleteUser\x12\x16.proto.DeleteUserInput\x1a\x17.proto.GeneralMsgOutput\"$\x82\xd3\xe4\x93\x02\x1e*\x1c/apis/v2/users/{email}/{key}\x12l\n\x10UpdateUserStatus\x12\x1c.proto.UpdateUserStatusInput\x1a\x17.proto.GeneralMsgOutput\"!\x82\xd3\xe4\x93\x02\x1b\x1a\x16/apis/v2/users/{email}:\x01*2\xcf\x02\n\x0b\x44iskprophet\x12T\n\x0b\x44PHeartbeat\x12\x0c.proto.Empty\x1a\x18.proto.DPHeartbeatOutput\"\x1d\x82\xd3\xe4\x93\x02\x17\x12\x15/apis/v2/dp/heartbeat\x12l\n\x12\x44PGetPhysicalDisks\x12\x1e.proto.DPGetPhysicalDisksInput\x1a\x15.proto.DPBinaryOutput\"\x1f\x82\xd3\xe4\x93\x02\x19\x12\x17/apis/v2/physical-disks\x12|\n\x14\x44PGetDisksPrediction\x12 .proto.DPGetDisksPredictionInput\x1a\x15.proto.DPBinaryOutput\"+\x82\xd3\xe4\x93\x02%\x12#/apis/v2/physical-disks/predictions2\xdb\x02\n\nCollection\x12l\n\x13\x43ollectionHeartbeat\x12\x0c.proto.Empty\x1a .proto.CollectionHeartbeatOutput\"%\x82\xd3\xe4\x93\x02\x1f\x12\x1d/apis/v2/collection/heartbeat\x12o\n\x0bPostDBRelay\x12\x17.proto.PostDBRelayInput\x1a\x1e.proto.CollectionMessageOutput\"\'\x82\xd3\xe4\x93\x02!\"\x1c/apis/v2/collection/relation:\x01*\x12n\n\x0bPostMetrics\x12\x17.proto.PostMetricsInput\x1a\x1e.proto.CollectionMessageOutput\"&\x82\xd3\xe4\x93\x02 \"\x1b/apis/v2/collection/metrics:\x01*b\x06proto3')
-  ,
-  dependencies=[google_dot_api_dot_annotations__pb2.DESCRIPTOR,])
-
-
-
-_PERSON_PHONETYPE = _descriptor.EnumDescriptor(
-  name='PhoneType',
-  full_name='proto.Person.PhoneType',
-  filename=None,
-  file=DESCRIPTOR,
-  values=[
-    _descriptor.EnumValueDescriptor(
-      name='MOBILE', index=0, number=0,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='HOME', index=1, number=1,
-      options=None,
-      type=None),
-    _descriptor.EnumValueDescriptor(
-      name='WORK', index=2, number=2,
-      options=None,
-      type=None),
-  ],
-  containing_type=None,
-  options=None,
-  serialized_start=579,
-  serialized_end=622,
-)
-_sym_db.RegisterEnumDescriptor(_PERSON_PHONETYPE)
-
-
-_EMPTY = _descriptor.Descriptor(
-  name='Empty',
-  full_name='proto.Empty',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=57,
-  serialized_end=64,
-)
-
-
-_GENERALMSGOUTPUT = _descriptor.Descriptor(
-  name='GeneralMsgOutput',
-  full_name='proto.GeneralMsgOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.GeneralMsgOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=66,
-  serialized_end=101,
-)
-
-
-_GENERALHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='GeneralHeartbeatOutput',
-  full_name='proto.GeneralHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.GeneralHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=103,
-  serialized_end=144,
-)
-
-
-_PINGOUTOUT = _descriptor.Descriptor(
-  name='PingOutout',
-  full_name='proto.PingOutout',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.PingOutout.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=146,
-  serialized_end=175,
-)
-
-
-_TESTINPUT = _descriptor.Descriptor(
-  name='TestInput',
-  full_name='proto.TestInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='people', full_name='proto.TestInput.people', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=177,
-  serialized_end=219,
-)
-
-
-_TESTOUTPUT_MAPVALUEENTRY = _descriptor.Descriptor(
-  name='MapValueEntry',
-  full_name='proto.TestOutput.MapValueEntry',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.TestOutput.MapValueEntry.key', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='value', full_name='proto.TestOutput.MapValueEntry.value', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=365,
-  serialized_end=412,
-)
-
-_TESTOUTPUT = _descriptor.Descriptor(
-  name='TestOutput',
-  full_name='proto.TestOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='strArray', full_name='proto.TestOutput.strArray', index=0,
-      number=1, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='mapValue', full_name='proto.TestOutput.mapValue', index=1,
-      number=2, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='pn', full_name='proto.TestOutput.pn', index=2,
-      number=4, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='profile', full_name='proto.TestOutput.profile', index=3,
-      number=3, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_TESTOUTPUT_MAPVALUEENTRY, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=222,
-  serialized_end=412,
-)
-
-
-_PERSON_PHONENUMBER = _descriptor.Descriptor(
-  name='PhoneNumber',
-  full_name='proto.Person.PhoneNumber',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='number', full_name='proto.Person.PhoneNumber.number', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='type', full_name='proto.Person.PhoneNumber.type', index=1,
-      number=2, type=14, cpp_type=8, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=509,
-  serialized_end=577,
-)
-
-_PERSON = _descriptor.Descriptor(
-  name='Person',
-  full_name='proto.Person',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='name', full_name='proto.Person.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='id', full_name='proto.Person.id', index=1,
-      number=2, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.Person.email', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='phones', full_name='proto.Person.phones', index=3,
-      number=4, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_PERSON_PHONENUMBER, ],
-  enum_types=[
-    _PERSON_PHONETYPE,
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=415,
-  serialized_end=622,
-)
-
-
-_PROFILE_FILE = _descriptor.Descriptor(
-  name='File',
-  full_name='proto.Profile.File',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='name', full_name='proto.Profile.File.name', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeInt32', full_name='proto.Profile.File.typeInt32', index=1,
-      number=2, type=5, cpp_type=1, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeInt64', full_name='proto.Profile.File.typeInt64', index=2,
-      number=3, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeFloat', full_name='proto.Profile.File.typeFloat', index=3,
-      number=4, type=2, cpp_type=6, label=1,
-      has_default_value=False, default_value=float(0),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='typeDouble', full_name='proto.Profile.File.typeDouble', index=4,
-      number=5, type=1, cpp_type=5, label=1,
-      has_default_value=False, default_value=float(0),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='booleanValue', full_name='proto.Profile.File.booleanValue', index=5,
-      number=6, type=8, cpp_type=7, label=1,
-      has_default_value=False, default_value=False,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=675,
-  serialized_end=794,
-)
-
-_PROFILE = _descriptor.Descriptor(
-  name='Profile',
-  full_name='proto.Profile',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='fileInfo', full_name='proto.Profile.fileInfo', index=0,
-      number=1, type=11, cpp_type=10, label=1,
-      has_default_value=False, default_value=None,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[_PROFILE_FILE, ],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=625,
-  serialized_end=794,
-)
-
-
-_GETUSERSBYSTATUSINPUT = _descriptor.Descriptor(
-  name='GetUsersByStatusInput',
-  full_name='proto.GetUsersByStatusInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.GetUsersByStatusInput.status', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.GetUsersByStatusInput.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=796,
-  serialized_end=848,
-)
-
-
-_GETUSERSBYSTATUSOUTPUT = _descriptor.Descriptor(
-  name='GetUsersByStatusOutput',
-  full_name='proto.GetUsersByStatusOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='users', full_name='proto.GetUsersByStatusOutput.users', index=0,
-      number=1, type=11, cpp_type=10, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=850,
-  serialized_end=908,
-)
-
-
-_ACCOUNTHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='AccountHeartbeatOutput',
-  full_name='proto.AccountHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.AccountHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=910,
-  serialized_end=951,
-)
-
-
-_LOGININPUT = _descriptor.Descriptor(
-  name='LoginInput',
-  full_name='proto.LoginInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.LoginInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='password', full_name='proto.LoginInput.password', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=953,
-  serialized_end=998,
-)
-
-
-_USEROUTPUT = _descriptor.Descriptor(
-  name='UserOutput',
-  full_name='proto.UserOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='id', full_name='proto.UserOutput.id', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.UserOutput.email', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.UserOutput.status', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='phone', full_name='proto.UserOutput.phone', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='firstName', full_name='proto.UserOutput.firstName', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='lastName', full_name='proto.UserOutput.lastName', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='createdTime', full_name='proto.UserOutput.createdTime', index=6,
-      number=7, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='namespace', full_name='proto.UserOutput.namespace', index=7,
-      number=8, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='domainName', full_name='proto.UserOutput.domainName', index=8,
-      number=9, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='company', full_name='proto.UserOutput.company', index=9,
-      number=10, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='url', full_name='proto.UserOutput.url', index=10,
-      number=11, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='agentAccount', full_name='proto.UserOutput.agentAccount', index=11,
-      number=12, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='agentPassword', full_name='proto.UserOutput.agentPassword', index=12,
-      number=13, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1001,
-  serialized_end=1243,
-)
-
-
-_SINGUPINPUT = _descriptor.Descriptor(
-  name='SingupInput',
-  full_name='proto.SingupInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.SingupInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='phone', full_name='proto.SingupInput.phone', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='firstName', full_name='proto.SingupInput.firstName', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='lastName', full_name='proto.SingupInput.lastName', index=3,
-      number=4, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='password', full_name='proto.SingupInput.password', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='company', full_name='proto.SingupInput.company', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1245,
-  serialized_end=1360,
-)
-
-
-_SINGUPOUTPUT = _descriptor.Descriptor(
-  name='SingupOutput',
-  full_name='proto.SingupOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.SingupOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1362,
-  serialized_end=1393,
-)
-
-
-_DELETEUSERINPUT = _descriptor.Descriptor(
-  name='DeleteUserInput',
-  full_name='proto.DeleteUserInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.DeleteUserInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.DeleteUserInput.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1395,
-  serialized_end=1440,
-)
-
-
-_UPDATEUSERSTATUSINPUT = _descriptor.Descriptor(
-  name='UpdateUserStatusInput',
-  full_name='proto.UpdateUserStatusInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.UpdateUserStatusInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='key', full_name='proto.UpdateUserStatusInput.key', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.UpdateUserStatusInput.status', index=2,
-      number=3, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1442,
-  serialized_end=1509,
-)
-
-
-_RESENDCONFIRMCODEINPUT = _descriptor.Descriptor(
-  name='ResendConfirmCodeInput',
-  full_name='proto.ResendConfirmCodeInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.ResendConfirmCodeInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1511,
-  serialized_end=1550,
-)
-
-
-_CONFIRMINPUT = _descriptor.Descriptor(
-  name='ConfirmInput',
-  full_name='proto.ConfirmInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='email', full_name='proto.ConfirmInput.email', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='code', full_name='proto.ConfirmInput.code', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1552,
-  serialized_end=1595,
-)
-
-
-_DPHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='DPHeartbeatOutput',
-  full_name='proto.DPHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.DPHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1597,
-  serialized_end=1633,
-)
-
-
-_DPGETPHYSICALDISKSINPUT = _descriptor.Descriptor(
-  name='DPGetPhysicalDisksInput',
-  full_name='proto.DPGetPhysicalDisksInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='hostIds', full_name='proto.DPGetPhysicalDisksInput.hostIds', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='ids', full_name='proto.DPGetPhysicalDisksInput.ids', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='limit', full_name='proto.DPGetPhysicalDisksInput.limit', index=2,
-      number=3, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='page', full_name='proto.DPGetPhysicalDisksInput.page', index=3,
-      number=4, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='from', full_name='proto.DPGetPhysicalDisksInput.from', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='to', full_name='proto.DPGetPhysicalDisksInput.to', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1635,
-  serialized_end=1745,
-)
-
-
-_DPGETDISKSPREDICTIONINPUT = _descriptor.Descriptor(
-  name='DPGetDisksPredictionInput',
-  full_name='proto.DPGetDisksPredictionInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='physicalDiskIds', full_name='proto.DPGetDisksPredictionInput.physicalDiskIds', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.DPGetDisksPredictionInput.status', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='limit', full_name='proto.DPGetDisksPredictionInput.limit', index=2,
-      number=3, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='page', full_name='proto.DPGetDisksPredictionInput.page', index=3,
-      number=4, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='from', full_name='proto.DPGetDisksPredictionInput.from', index=4,
-      number=5, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='to', full_name='proto.DPGetDisksPredictionInput.to', index=5,
-      number=6, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1747,
-  serialized_end=1870,
-)
-
-
-_DPBINARYOUTPUT = _descriptor.Descriptor(
-  name='DPBinaryOutput',
-  full_name='proto.DPBinaryOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='data', full_name='proto.DPBinaryOutput.data', index=0,
-      number=1, type=12, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b(""),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1872,
-  serialized_end=1902,
-)
-
-
-_COLLECTIONHEARTBEATOUTPUT = _descriptor.Descriptor(
-  name='CollectionHeartbeatOutput',
-  full_name='proto.CollectionHeartbeatOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.CollectionHeartbeatOutput.message', index=0,
-      number=1, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1904,
-  serialized_end=1948,
-)
-
-
-_POSTMETRICSINPUT = _descriptor.Descriptor(
-  name='PostMetricsInput',
-  full_name='proto.PostMetricsInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='points', full_name='proto.PostMetricsInput.points', index=0,
-      number=1, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1950,
-  serialized_end=1984,
-)
-
-
-_POSTDBRELAYINPUT = _descriptor.Descriptor(
-  name='PostDBRelayInput',
-  full_name='proto.PostDBRelayInput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='cmds', full_name='proto.PostDBRelayInput.cmds', index=0,
-      number=1, type=9, cpp_type=9, label=3,
-      has_default_value=False, default_value=[],
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=1986,
-  serialized_end=2018,
-)
-
-
-_COLLECTIONMESSAGEOUTPUT = _descriptor.Descriptor(
-  name='CollectionMessageOutput',
-  full_name='proto.CollectionMessageOutput',
-  filename=None,
-  file=DESCRIPTOR,
-  containing_type=None,
-  fields=[
-    _descriptor.FieldDescriptor(
-      name='status', full_name='proto.CollectionMessageOutput.status', index=0,
-      number=1, type=3, cpp_type=2, label=1,
-      has_default_value=False, default_value=0,
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-    _descriptor.FieldDescriptor(
-      name='message', full_name='proto.CollectionMessageOutput.message', index=1,
-      number=2, type=9, cpp_type=9, label=1,
-      has_default_value=False, default_value=_b("").decode('utf-8'),
-      message_type=None, enum_type=None, containing_type=None,
-      is_extension=False, extension_scope=None,
-      options=None, file=DESCRIPTOR),
-  ],
-  extensions=[
-  ],
-  nested_types=[],
-  enum_types=[
-  ],
-  options=None,
-  is_extendable=False,
-  syntax='proto3',
-  extension_ranges=[],
-  oneofs=[
-  ],
-  serialized_start=2020,
-  serialized_end=2078,
-)
-
-_TESTINPUT.fields_by_name['people'].message_type = _PERSON
-_TESTOUTPUT_MAPVALUEENTRY.containing_type = _TESTOUTPUT
-_TESTOUTPUT.fields_by_name['mapValue'].message_type = _TESTOUTPUT_MAPVALUEENTRY
-_TESTOUTPUT.fields_by_name['pn'].message_type = _PERSON
-_TESTOUTPUT.fields_by_name['profile'].message_type = _PROFILE
-_PERSON_PHONENUMBER.fields_by_name['type'].enum_type = _PERSON_PHONETYPE
-_PERSON_PHONENUMBER.containing_type = _PERSON
-_PERSON.fields_by_name['phones'].message_type = _PERSON_PHONENUMBER
-_PERSON_PHONETYPE.containing_type = _PERSON
-_PROFILE_FILE.containing_type = _PROFILE
-_PROFILE.fields_by_name['fileInfo'].message_type = _PROFILE_FILE
-_GETUSERSBYSTATUSOUTPUT.fields_by_name['users'].message_type = _USEROUTPUT
-DESCRIPTOR.message_types_by_name['Empty'] = _EMPTY
-DESCRIPTOR.message_types_by_name['GeneralMsgOutput'] = _GENERALMSGOUTPUT
-DESCRIPTOR.message_types_by_name['GeneralHeartbeatOutput'] = _GENERALHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['PingOutout'] = _PINGOUTOUT
-DESCRIPTOR.message_types_by_name['TestInput'] = _TESTINPUT
-DESCRIPTOR.message_types_by_name['TestOutput'] = _TESTOUTPUT
-DESCRIPTOR.message_types_by_name['Person'] = _PERSON
-DESCRIPTOR.message_types_by_name['Profile'] = _PROFILE
-DESCRIPTOR.message_types_by_name['GetUsersByStatusInput'] = _GETUSERSBYSTATUSINPUT
-DESCRIPTOR.message_types_by_name['GetUsersByStatusOutput'] = _GETUSERSBYSTATUSOUTPUT
-DESCRIPTOR.message_types_by_name['AccountHeartbeatOutput'] = _ACCOUNTHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['LoginInput'] = _LOGININPUT
-DESCRIPTOR.message_types_by_name['UserOutput'] = _USEROUTPUT
-DESCRIPTOR.message_types_by_name['SingupInput'] = _SINGUPINPUT
-DESCRIPTOR.message_types_by_name['SingupOutput'] = _SINGUPOUTPUT
-DESCRIPTOR.message_types_by_name['DeleteUserInput'] = _DELETEUSERINPUT
-DESCRIPTOR.message_types_by_name['UpdateUserStatusInput'] = _UPDATEUSERSTATUSINPUT
-DESCRIPTOR.message_types_by_name['ResendConfirmCodeInput'] = _RESENDCONFIRMCODEINPUT
-DESCRIPTOR.message_types_by_name['ConfirmInput'] = _CONFIRMINPUT
-DESCRIPTOR.message_types_by_name['DPHeartbeatOutput'] = _DPHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['DPGetPhysicalDisksInput'] = _DPGETPHYSICALDISKSINPUT
-DESCRIPTOR.message_types_by_name['DPGetDisksPredictionInput'] = _DPGETDISKSPREDICTIONINPUT
-DESCRIPTOR.message_types_by_name['DPBinaryOutput'] = _DPBINARYOUTPUT
-DESCRIPTOR.message_types_by_name['CollectionHeartbeatOutput'] = _COLLECTIONHEARTBEATOUTPUT
-DESCRIPTOR.message_types_by_name['PostMetricsInput'] = _POSTMETRICSINPUT
-DESCRIPTOR.message_types_by_name['PostDBRelayInput'] = _POSTDBRELAYINPUT
-DESCRIPTOR.message_types_by_name['CollectionMessageOutput'] = _COLLECTIONMESSAGEOUTPUT
-_sym_db.RegisterFileDescriptor(DESCRIPTOR)
-
-Empty = _reflection.GeneratedProtocolMessageType('Empty', (_message.Message,), dict(
-  DESCRIPTOR = _EMPTY,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.Empty)
-  ))
-_sym_db.RegisterMessage(Empty)
-
-GeneralMsgOutput = _reflection.GeneratedProtocolMessageType('GeneralMsgOutput', (_message.Message,), dict(
-  DESCRIPTOR = _GENERALMSGOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GeneralMsgOutput)
-  ))
-_sym_db.RegisterMessage(GeneralMsgOutput)
-
-GeneralHeartbeatOutput = _reflection.GeneratedProtocolMessageType('GeneralHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _GENERALHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GeneralHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(GeneralHeartbeatOutput)
-
-PingOutout = _reflection.GeneratedProtocolMessageType('PingOutout', (_message.Message,), dict(
-  DESCRIPTOR = _PINGOUTOUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.PingOutout)
-  ))
-_sym_db.RegisterMessage(PingOutout)
-
-TestInput = _reflection.GeneratedProtocolMessageType('TestInput', (_message.Message,), dict(
-  DESCRIPTOR = _TESTINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.TestInput)
-  ))
-_sym_db.RegisterMessage(TestInput)
-
-TestOutput = _reflection.GeneratedProtocolMessageType('TestOutput', (_message.Message,), dict(
-
-  MapValueEntry = _reflection.GeneratedProtocolMessageType('MapValueEntry', (_message.Message,), dict(
-    DESCRIPTOR = _TESTOUTPUT_MAPVALUEENTRY,
-    __module__ = 'mainServer_pb2'
-    # @@protoc_insertion_point(class_scope:proto.TestOutput.MapValueEntry)
-    ))
-  ,
-  DESCRIPTOR = _TESTOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.TestOutput)
-  ))
-_sym_db.RegisterMessage(TestOutput)
-_sym_db.RegisterMessage(TestOutput.MapValueEntry)
-
-Person = _reflection.GeneratedProtocolMessageType('Person', (_message.Message,), dict(
-
-  PhoneNumber = _reflection.GeneratedProtocolMessageType('PhoneNumber', (_message.Message,), dict(
-    DESCRIPTOR = _PERSON_PHONENUMBER,
-    __module__ = 'mainServer_pb2'
-    # @@protoc_insertion_point(class_scope:proto.Person.PhoneNumber)
-    ))
-  ,
-  DESCRIPTOR = _PERSON,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.Person)
-  ))
-_sym_db.RegisterMessage(Person)
-_sym_db.RegisterMessage(Person.PhoneNumber)
-
-Profile = _reflection.GeneratedProtocolMessageType('Profile', (_message.Message,), dict(
-
-  File = _reflection.GeneratedProtocolMessageType('File', (_message.Message,), dict(
-    DESCRIPTOR = _PROFILE_FILE,
-    __module__ = 'mainServer_pb2'
-    # @@protoc_insertion_point(class_scope:proto.Profile.File)
-    ))
-  ,
-  DESCRIPTOR = _PROFILE,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.Profile)
-  ))
-_sym_db.RegisterMessage(Profile)
-_sym_db.RegisterMessage(Profile.File)
-
-GetUsersByStatusInput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusInput', (_message.Message,), dict(
-  DESCRIPTOR = _GETUSERSBYSTATUSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusInput)
-  ))
-_sym_db.RegisterMessage(GetUsersByStatusInput)
-
-GetUsersByStatusOutput = _reflection.GeneratedProtocolMessageType('GetUsersByStatusOutput', (_message.Message,), dict(
-  DESCRIPTOR = _GETUSERSBYSTATUSOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.GetUsersByStatusOutput)
-  ))
-_sym_db.RegisterMessage(GetUsersByStatusOutput)
-
-AccountHeartbeatOutput = _reflection.GeneratedProtocolMessageType('AccountHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _ACCOUNTHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.AccountHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(AccountHeartbeatOutput)
-
-LoginInput = _reflection.GeneratedProtocolMessageType('LoginInput', (_message.Message,), dict(
-  DESCRIPTOR = _LOGININPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.LoginInput)
-  ))
-_sym_db.RegisterMessage(LoginInput)
-
-UserOutput = _reflection.GeneratedProtocolMessageType('UserOutput', (_message.Message,), dict(
-  DESCRIPTOR = _USEROUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.UserOutput)
-  ))
-_sym_db.RegisterMessage(UserOutput)
-
-SingupInput = _reflection.GeneratedProtocolMessageType('SingupInput', (_message.Message,), dict(
-  DESCRIPTOR = _SINGUPINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.SingupInput)
-  ))
-_sym_db.RegisterMessage(SingupInput)
-
-SingupOutput = _reflection.GeneratedProtocolMessageType('SingupOutput', (_message.Message,), dict(
-  DESCRIPTOR = _SINGUPOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.SingupOutput)
-  ))
-_sym_db.RegisterMessage(SingupOutput)
-
-DeleteUserInput = _reflection.GeneratedProtocolMessageType('DeleteUserInput', (_message.Message,), dict(
-  DESCRIPTOR = _DELETEUSERINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DeleteUserInput)
-  ))
-_sym_db.RegisterMessage(DeleteUserInput)
-
-UpdateUserStatusInput = _reflection.GeneratedProtocolMessageType('UpdateUserStatusInput', (_message.Message,), dict(
-  DESCRIPTOR = _UPDATEUSERSTATUSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.UpdateUserStatusInput)
-  ))
-_sym_db.RegisterMessage(UpdateUserStatusInput)
-
-ResendConfirmCodeInput = _reflection.GeneratedProtocolMessageType('ResendConfirmCodeInput', (_message.Message,), dict(
-  DESCRIPTOR = _RESENDCONFIRMCODEINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.ResendConfirmCodeInput)
-  ))
-_sym_db.RegisterMessage(ResendConfirmCodeInput)
-
-ConfirmInput = _reflection.GeneratedProtocolMessageType('ConfirmInput', (_message.Message,), dict(
-  DESCRIPTOR = _CONFIRMINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.ConfirmInput)
-  ))
-_sym_db.RegisterMessage(ConfirmInput)
-
-DPHeartbeatOutput = _reflection.GeneratedProtocolMessageType('DPHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _DPHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(DPHeartbeatOutput)
-
-DPGetPhysicalDisksInput = _reflection.GeneratedProtocolMessageType('DPGetPhysicalDisksInput', (_message.Message,), dict(
-  DESCRIPTOR = _DPGETPHYSICALDISKSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPGetPhysicalDisksInput)
-  ))
-_sym_db.RegisterMessage(DPGetPhysicalDisksInput)
-
-DPGetDisksPredictionInput = _reflection.GeneratedProtocolMessageType('DPGetDisksPredictionInput', (_message.Message,), dict(
-  DESCRIPTOR = _DPGETDISKSPREDICTIONINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPGetDisksPredictionInput)
-  ))
-_sym_db.RegisterMessage(DPGetDisksPredictionInput)
-
-DPBinaryOutput = _reflection.GeneratedProtocolMessageType('DPBinaryOutput', (_message.Message,), dict(
-  DESCRIPTOR = _DPBINARYOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.DPBinaryOutput)
-  ))
-_sym_db.RegisterMessage(DPBinaryOutput)
-
-CollectionHeartbeatOutput = _reflection.GeneratedProtocolMessageType('CollectionHeartbeatOutput', (_message.Message,), dict(
-  DESCRIPTOR = _COLLECTIONHEARTBEATOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.CollectionHeartbeatOutput)
-  ))
-_sym_db.RegisterMessage(CollectionHeartbeatOutput)
-
-PostMetricsInput = _reflection.GeneratedProtocolMessageType('PostMetricsInput', (_message.Message,), dict(
-  DESCRIPTOR = _POSTMETRICSINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.PostMetricsInput)
-  ))
-_sym_db.RegisterMessage(PostMetricsInput)
-
-PostDBRelayInput = _reflection.GeneratedProtocolMessageType('PostDBRelayInput', (_message.Message,), dict(
-  DESCRIPTOR = _POSTDBRELAYINPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.PostDBRelayInput)
-  ))
-_sym_db.RegisterMessage(PostDBRelayInput)
-
-CollectionMessageOutput = _reflection.GeneratedProtocolMessageType('CollectionMessageOutput', (_message.Message,), dict(
-  DESCRIPTOR = _COLLECTIONMESSAGEOUTPUT,
-  __module__ = 'mainServer_pb2'
-  # @@protoc_insertion_point(class_scope:proto.CollectionMessageOutput)
-  ))
-_sym_db.RegisterMessage(CollectionMessageOutput)
-
-
-_TESTOUTPUT_MAPVALUEENTRY.has_options = True
-_TESTOUTPUT_MAPVALUEENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
-
-_GENERAL = _descriptor.ServiceDescriptor(
-  name='General',
-  full_name='proto.General',
-  file=DESCRIPTOR,
-  index=0,
-  options=None,
-  serialized_start=2081,
-  serialized_end=2342,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='GeneralHeartbeat',
-    full_name='proto.General.GeneralHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_GENERALHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/general/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Ping',
-    full_name='proto.General.Ping',
-    index=1,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_PINGOUTOUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/general/ping')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Test',
-    full_name='proto.General.Test',
-    index=2,
-    containing_service=None,
-    input_type=_TESTINPUT,
-    output_type=_TESTOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/general/test:\001*')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_GENERAL)
-
-DESCRIPTOR.services_by_name['General'] = _GENERAL
-
-
-_ACCOUNT = _descriptor.ServiceDescriptor(
-  name='Account',
-  full_name='proto.Account',
-  file=DESCRIPTOR,
-  index=1,
-  options=None,
-  serialized_start=2345,
-  serialized_end=3149,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='AccountHeartbeat',
-    full_name='proto.Account.AccountHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_ACCOUNTHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\034\022\032/apis/v2/account/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Login',
-    full_name='proto.Account.Login',
-    index=1,
-    containing_service=None,
-    input_type=_LOGININPUT,
-    output_type=_USEROUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\"\024/apis/v2/users/login:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Signup',
-    full_name='proto.Account.Signup',
-    index=2,
-    containing_service=None,
-    input_type=_SINGUPINPUT,
-    output_type=_SINGUPOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\032\"\025/apis/v2/users/signup:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='ResendConfirmCode',
-    full_name='proto.Account.ResendConfirmCode',
-    index=3,
-    containing_service=None,
-    input_type=_RESENDCONFIRMCODEINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\"\032/apis/v2/users/confirmcode:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='Confirm',
-    full_name='proto.Account.Confirm',
-    index=4,
-    containing_service=None,
-    input_type=_CONFIRMINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/users/confirmation:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='GetUsersByStatus',
-    full_name='proto.Account.GetUsersByStatus',
-    index=5,
-    containing_service=None,
-    input_type=_GETUSERSBYSTATUSINPUT,
-    output_type=_GETUSERSBYSTATUSOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\020\022\016/apis/v2/users')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='DeleteUser',
-    full_name='proto.Account.DeleteUser',
-    index=6,
-    containing_service=None,
-    input_type=_DELETEUSERINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\036*\034/apis/v2/users/{email}/{key}')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='UpdateUserStatus',
-    full_name='proto.Account.UpdateUserStatus',
-    index=7,
-    containing_service=None,
-    input_type=_UPDATEUSERSTATUSINPUT,
-    output_type=_GENERALMSGOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\033\032\026/apis/v2/users/{email}:\001*')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_ACCOUNT)
-
-DESCRIPTOR.services_by_name['Account'] = _ACCOUNT
-
-
-_DISKPROPHET = _descriptor.ServiceDescriptor(
-  name='Diskprophet',
-  full_name='proto.Diskprophet',
-  file=DESCRIPTOR,
-  index=2,
-  options=None,
-  serialized_start=3152,
-  serialized_end=3487,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='DPHeartbeat',
-    full_name='proto.Diskprophet.DPHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_DPHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\027\022\025/apis/v2/dp/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='DPGetPhysicalDisks',
-    full_name='proto.Diskprophet.DPGetPhysicalDisks',
-    index=1,
-    containing_service=None,
-    input_type=_DPGETPHYSICALDISKSINPUT,
-    output_type=_DPBINARYOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\031\022\027/apis/v2/physical-disks')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='DPGetDisksPrediction',
-    full_name='proto.Diskprophet.DPGetDisksPrediction',
-    index=2,
-    containing_service=None,
-    input_type=_DPGETDISKSPREDICTIONINPUT,
-    output_type=_DPBINARYOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002%\022#/apis/v2/physical-disks/predictions')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_DISKPROPHET)
-
-DESCRIPTOR.services_by_name['Diskprophet'] = _DISKPROPHET
-
-
-_COLLECTION = _descriptor.ServiceDescriptor(
-  name='Collection',
-  full_name='proto.Collection',
-  file=DESCRIPTOR,
-  index=3,
-  options=None,
-  serialized_start=3490,
-  serialized_end=3837,
-  methods=[
-  _descriptor.MethodDescriptor(
-    name='CollectionHeartbeat',
-    full_name='proto.Collection.CollectionHeartbeat',
-    index=0,
-    containing_service=None,
-    input_type=_EMPTY,
-    output_type=_COLLECTIONHEARTBEATOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002\037\022\035/apis/v2/collection/heartbeat')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='PostDBRelay',
-    full_name='proto.Collection.PostDBRelay',
-    index=1,
-    containing_service=None,
-    input_type=_POSTDBRELAYINPUT,
-    output_type=_COLLECTIONMESSAGEOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002!\"\034/apis/v2/collection/relation:\001*')),
-  ),
-  _descriptor.MethodDescriptor(
-    name='PostMetrics',
-    full_name='proto.Collection.PostMetrics',
-    index=2,
-    containing_service=None,
-    input_type=_POSTMETRICSINPUT,
-    output_type=_COLLECTIONMESSAGEOUTPUT,
-    options=_descriptor._ParseOptions(descriptor_pb2.MethodOptions(), _b('\202\323\344\223\002 \"\033/apis/v2/collection/metrics:\001*')),
-  ),
-])
-_sym_db.RegisterServiceDescriptor(_COLLECTION)
-
-DESCRIPTOR.services_by_name['Collection'] = _COLLECTION
-
-# @@protoc_insertion_point(module_scope)
diff --git a/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py b/src/pybind/mgr/diskprediction_cloud/common/client_pb2_grpc.py
deleted file mode 100644 (file)
index c1c3217..0000000
+++ /dev/null
@@ -1,395 +0,0 @@
-# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
-import grpc
-
-import client_pb2 as mainServer__pb2
-
-
-class GeneralStub(object):
-  """-------------------------- General -------------------------------------
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.GeneralHeartbeat = channel.unary_unary(
-        '/proto.General/GeneralHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralHeartbeatOutput.FromString,
-        )
-    self.Ping = channel.unary_unary(
-        '/proto.General/Ping',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.PingOutout.FromString,
-        )
-    self.Test = channel.unary_unary(
-        '/proto.General/Test',
-        request_serializer=mainServer__pb2.TestInput.SerializeToString,
-        response_deserializer=mainServer__pb2.TestOutput.FromString,
-        )
-
-
-class GeneralServicer(object):
-  """-------------------------- General -------------------------------------
-  """
-
-  def GeneralHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Ping(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Test(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_GeneralServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'GeneralHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.GeneralHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.GeneralHeartbeatOutput.SerializeToString,
-      ),
-      'Ping': grpc.unary_unary_rpc_method_handler(
-          servicer.Ping,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.PingOutout.SerializeToString,
-      ),
-      'Test': grpc.unary_unary_rpc_method_handler(
-          servicer.Test,
-          request_deserializer=mainServer__pb2.TestInput.FromString,
-          response_serializer=mainServer__pb2.TestOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.General', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
-
-
-class AccountStub(object):
-  """-------------------------- SERVER ACCOUNT ------------------------------
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.AccountHeartbeat = channel.unary_unary(
-        '/proto.Account/AccountHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.AccountHeartbeatOutput.FromString,
-        )
-    self.Login = channel.unary_unary(
-        '/proto.Account/Login',
-        request_serializer=mainServer__pb2.LoginInput.SerializeToString,
-        response_deserializer=mainServer__pb2.UserOutput.FromString,
-        )
-    self.Signup = channel.unary_unary(
-        '/proto.Account/Signup',
-        request_serializer=mainServer__pb2.SingupInput.SerializeToString,
-        response_deserializer=mainServer__pb2.SingupOutput.FromString,
-        )
-    self.ResendConfirmCode = channel.unary_unary(
-        '/proto.Account/ResendConfirmCode',
-        request_serializer=mainServer__pb2.ResendConfirmCodeInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-    self.Confirm = channel.unary_unary(
-        '/proto.Account/Confirm',
-        request_serializer=mainServer__pb2.ConfirmInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-    self.GetUsersByStatus = channel.unary_unary(
-        '/proto.Account/GetUsersByStatus',
-        request_serializer=mainServer__pb2.GetUsersByStatusInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GetUsersByStatusOutput.FromString,
-        )
-    self.DeleteUser = channel.unary_unary(
-        '/proto.Account/DeleteUser',
-        request_serializer=mainServer__pb2.DeleteUserInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-    self.UpdateUserStatus = channel.unary_unary(
-        '/proto.Account/UpdateUserStatus',
-        request_serializer=mainServer__pb2.UpdateUserStatusInput.SerializeToString,
-        response_deserializer=mainServer__pb2.GeneralMsgOutput.FromString,
-        )
-
-
-class AccountServicer(object):
-  """-------------------------- SERVER ACCOUNT ------------------------------
-  """
-
-  def AccountHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Login(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Signup(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def ResendConfirmCode(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def Confirm(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def GetUsersByStatus(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def DeleteUser(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def UpdateUserStatus(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_AccountServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'AccountHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.AccountHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.AccountHeartbeatOutput.SerializeToString,
-      ),
-      'Login': grpc.unary_unary_rpc_method_handler(
-          servicer.Login,
-          request_deserializer=mainServer__pb2.LoginInput.FromString,
-          response_serializer=mainServer__pb2.UserOutput.SerializeToString,
-      ),
-      'Signup': grpc.unary_unary_rpc_method_handler(
-          servicer.Signup,
-          request_deserializer=mainServer__pb2.SingupInput.FromString,
-          response_serializer=mainServer__pb2.SingupOutput.SerializeToString,
-      ),
-      'ResendConfirmCode': grpc.unary_unary_rpc_method_handler(
-          servicer.ResendConfirmCode,
-          request_deserializer=mainServer__pb2.ResendConfirmCodeInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-      'Confirm': grpc.unary_unary_rpc_method_handler(
-          servicer.Confirm,
-          request_deserializer=mainServer__pb2.ConfirmInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-      'GetUsersByStatus': grpc.unary_unary_rpc_method_handler(
-          servicer.GetUsersByStatus,
-          request_deserializer=mainServer__pb2.GetUsersByStatusInput.FromString,
-          response_serializer=mainServer__pb2.GetUsersByStatusOutput.SerializeToString,
-      ),
-      'DeleteUser': grpc.unary_unary_rpc_method_handler(
-          servicer.DeleteUser,
-          request_deserializer=mainServer__pb2.DeleteUserInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-      'UpdateUserStatus': grpc.unary_unary_rpc_method_handler(
-          servicer.UpdateUserStatus,
-          request_deserializer=mainServer__pb2.UpdateUserStatusInput.FromString,
-          response_serializer=mainServer__pb2.GeneralMsgOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.Account', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
-
-
-class DiskprophetStub(object):
-  """------------------------ SERVER DISKPROPHET ---------------------------
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.DPHeartbeat = channel.unary_unary(
-        '/proto.Diskprophet/DPHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.DPHeartbeatOutput.FromString,
-        )
-    self.DPGetPhysicalDisks = channel.unary_unary(
-        '/proto.Diskprophet/DPGetPhysicalDisks',
-        request_serializer=mainServer__pb2.DPGetPhysicalDisksInput.SerializeToString,
-        response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
-        )
-    self.DPGetDisksPrediction = channel.unary_unary(
-        '/proto.Diskprophet/DPGetDisksPrediction',
-        request_serializer=mainServer__pb2.DPGetDisksPredictionInput.SerializeToString,
-        response_deserializer=mainServer__pb2.DPBinaryOutput.FromString,
-        )
-
-
-class DiskprophetServicer(object):
-  """------------------------ SERVER DISKPROPHET ---------------------------
-  """
-
-  def DPHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def DPGetPhysicalDisks(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def DPGetDisksPrediction(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_DiskprophetServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'DPHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.DPHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.DPHeartbeatOutput.SerializeToString,
-      ),
-      'DPGetPhysicalDisks': grpc.unary_unary_rpc_method_handler(
-          servicer.DPGetPhysicalDisks,
-          request_deserializer=mainServer__pb2.DPGetPhysicalDisksInput.FromString,
-          response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
-      ),
-      'DPGetDisksPrediction': grpc.unary_unary_rpc_method_handler(
-          servicer.DPGetDisksPrediction,
-          request_deserializer=mainServer__pb2.DPGetDisksPredictionInput.FromString,
-          response_serializer=mainServer__pb2.DPBinaryOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.Diskprophet', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
-
-
-class CollectionStub(object):
-  """------------------------ SERVER Collection ---------------------------
-
-  """
-
-  def __init__(self, channel):
-    """Constructor.
-
-    Args:
-      channel: A grpc.Channel.
-    """
-    self.CollectionHeartbeat = channel.unary_unary(
-        '/proto.Collection/CollectionHeartbeat',
-        request_serializer=mainServer__pb2.Empty.SerializeToString,
-        response_deserializer=mainServer__pb2.CollectionHeartbeatOutput.FromString,
-        )
-    self.PostDBRelay = channel.unary_unary(
-        '/proto.Collection/PostDBRelay',
-        request_serializer=mainServer__pb2.PostDBRelayInput.SerializeToString,
-        response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
-        )
-    self.PostMetrics = channel.unary_unary(
-        '/proto.Collection/PostMetrics',
-        request_serializer=mainServer__pb2.PostMetricsInput.SerializeToString,
-        response_deserializer=mainServer__pb2.CollectionMessageOutput.FromString,
-        )
-
-
-class CollectionServicer(object):
-  """------------------------ SERVER Collection ---------------------------
-
-  """
-
-  def CollectionHeartbeat(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def PostDBRelay(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-  def PostMetrics(self, request, context):
-    # missing associated documentation comment in .proto file
-    pass
-    context.set_code(grpc.StatusCode.UNIMPLEMENTED)
-    context.set_details('Method not implemented!')
-    raise NotImplementedError('Method not implemented!')
-
-
-def add_CollectionServicer_to_server(servicer, server):
-  rpc_method_handlers = {
-      'CollectionHeartbeat': grpc.unary_unary_rpc_method_handler(
-          servicer.CollectionHeartbeat,
-          request_deserializer=mainServer__pb2.Empty.FromString,
-          response_serializer=mainServer__pb2.CollectionHeartbeatOutput.SerializeToString,
-      ),
-      'PostDBRelay': grpc.unary_unary_rpc_method_handler(
-          servicer.PostDBRelay,
-          request_deserializer=mainServer__pb2.PostDBRelayInput.FromString,
-          response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
-      ),
-      'PostMetrics': grpc.unary_unary_rpc_method_handler(
-          servicer.PostMetrics,
-          request_deserializer=mainServer__pb2.PostMetricsInput.FromString,
-          response_serializer=mainServer__pb2.CollectionMessageOutput.SerializeToString,
-      ),
-  }
-  generic_handler = grpc.method_handlers_generic_handler(
-      'proto.Collection', rpc_method_handlers)
-  server.add_generic_rpc_handlers((generic_handler,))
diff --git a/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py b/src/pybind/mgr/diskprediction_cloud/common/clusterdata.py
deleted file mode 100644 (file)
index 45add69..0000000
+++ /dev/null
@@ -1,464 +0,0 @@
-"""\r
-Ceph database API\r
-\r
-"""\r
-from __future__ import absolute_import\r
-\r
-import json\r
-import rbd\r
-from mgr_module import CommandResult\r
-\r
-GB = 1024 * 1024 * 1024\r
-\r
-\r
-RBD_FEATURES_NAME_MAPPING = {\r
-    rbd.RBD_FEATURE_LAYERING: 'layering',\r
-    rbd.RBD_FEATURE_STRIPINGV2: 'striping',\r
-    rbd.RBD_FEATURE_EXCLUSIVE_LOCK: 'exclusive-lock',\r
-    rbd.RBD_FEATURE_OBJECT_MAP: 'object-map',\r
-    rbd.RBD_FEATURE_FAST_DIFF: 'fast-diff',\r
-    rbd.RBD_FEATURE_DEEP_FLATTEN: 'deep-flatten',\r
-    rbd.RBD_FEATURE_JOURNALING: 'journaling',\r
-    rbd.RBD_FEATURE_DATA_POOL: 'data-pool',\r
-    rbd.RBD_FEATURE_OPERATIONS: 'operations',\r
-}\r
-\r
-\r
-def differentiate(data1, data2):\r
-    """\r
-    # >>> times = [0, 2]\r
-    # >>> values = [100, 101]\r
-    # >>> differentiate(*zip(times, values))\r
-    0.5\r
-    """\r
-    return (data2[1] - data1[1]) / float(data2[0] - data1[0])\r
-\r
-\r
-class ClusterAPI(object):\r
-\r
-    def __init__(self, module_obj):\r
-        self.module = module_obj\r
-\r
-    @staticmethod\r
-    def format_bitmask(features):\r
-        """\r
-        Formats the bitmask:\r
-        # >>> format_bitmask(45)\r
-        ['deep-flatten', 'exclusive-lock', 'layering', 'object-map']\r
-        """\r
-        names = [val for key, val in RBD_FEATURES_NAME_MAPPING.items()\r
-                 if key & features == key]\r
-        return sorted(names)\r
-\r
-    def _open_connection(self, pool_name='device_health_metrics'):\r
-        pools = self.module.rados.list_pools()\r
-        is_pool = False\r
-        for pool in pools:\r
-            if pool == pool_name:\r
-                is_pool = True\r
-                break\r
-        if not is_pool:\r
-            self.module.log.debug('create %s pool' % pool_name)\r
-            # create pool\r
-            result = CommandResult('')\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'osd pool create',\r
-                'format': 'json',\r
-                'pool': pool_name,\r
-                'pg_num': 1,\r
-            }), '')\r
-            r, outb, outs = result.wait()\r
-            assert r == 0\r
-\r
-            # set pool application\r
-            result = CommandResult('')\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'osd pool application enable',\r
-                'format': 'json',\r
-                'pool': pool_name,\r
-                'app': 'mgr_devicehealth',\r
-            }), '')\r
-            r, outb, outs = result.wait()\r
-            assert r == 0\r
-\r
-        ioctx = self.module.rados.open_ioctx(pool_name)\r
-        return ioctx\r
-\r
-    @classmethod\r
-    def _rbd_disk_usage(cls, image, snaps, whole_object=True):\r
-        class DUCallback(object):\r
-            def __init__(self):\r
-                self.used_size = 0\r
-\r
-            def __call__(self, offset, length, exists):\r
-                if exists:\r
-                    self.used_size += length\r
-        snap_map = {}\r
-        prev_snap = None\r
-        total_used_size = 0\r
-        for _, size, name in snaps:\r
-            image.set_snap(name)\r
-            du_callb = DUCallback()\r
-            image.diff_iterate(0, size, prev_snap, du_callb,\r
-                               whole_object=whole_object)\r
-            snap_map[name] = du_callb.used_size\r
-            total_used_size += du_callb.used_size\r
-            prev_snap = name\r
-        return total_used_size, snap_map\r
-\r
-    def _rbd_image(self, ioctx, pool_name, image_name):\r
-        with rbd.Image(ioctx, image_name) as img:\r
-            stat = img.stat()\r
-            stat['name'] = image_name\r
-            stat['id'] = img.id()\r
-            stat['pool_name'] = pool_name\r
-            features = img.features()\r
-            stat['features'] = features\r
-            stat['features_name'] = self.format_bitmask(features)\r
-\r
-            # the following keys are deprecated\r
-            del stat['parent_pool']\r
-            del stat['parent_name']\r
-            stat['timestamp'] = '{}Z'.format(img.create_timestamp()\r
-                                             .isoformat())\r
-            stat['stripe_count'] = img.stripe_count()\r
-            stat['stripe_unit'] = img.stripe_unit()\r
-            stat['data_pool'] = None\r
-            try:\r
-                parent_info = img.parent_info()\r
-                stat['parent'] = {\r
-                    'pool_name': parent_info[0],\r
-                    'image_name': parent_info[1],\r
-                    'snap_name': parent_info[2]\r
-                }\r
-            except rbd.ImageNotFound:\r
-                # no parent image\r
-                stat['parent'] = None\r
-            # snapshots\r
-            stat['snapshots'] = []\r
-            for snap in img.list_snaps():\r
-                snap['timestamp'] = '{}Z'.format(\r
-                    img.get_snap_timestamp(snap['id']).isoformat())\r
-                snap['is_protected'] = img.is_protected_snap(snap['name'])\r
-                snap['used_bytes'] = None\r
-                snap['children'] = []\r
-                img.set_snap(snap['name'])\r
-                for child_pool_name, child_image_name in img.list_children():\r
-                    snap['children'].append({\r
-                        'pool_name': child_pool_name,\r
-                        'image_name': child_image_name\r
-                    })\r
-                stat['snapshots'].append(snap)\r
-            # disk usage\r
-            if 'fast-diff' in stat['features_name']:\r
-                snaps = [(s['id'], s['size'], s['name'])\r
-                         for s in stat['snapshots']]\r
-                snaps.sort(key=lambda s: s[0])\r
-                snaps += [(snaps[-1][0]+1 if snaps else 0, stat['size'], None)]\r
-                total_prov_bytes, snaps_prov_bytes = self._rbd_disk_usage(\r
-                    img, snaps, True)\r
-                stat['total_disk_usage'] = total_prov_bytes\r
-                for snap, prov_bytes in snaps_prov_bytes.items():\r
-                    if snap is None:\r
-                        stat['disk_usage'] = prov_bytes\r
-                        continue\r
-                    for ss in stat['snapshots']:\r
-                        if ss['name'] == snap:\r
-                            ss['disk_usage'] = prov_bytes\r
-                            break\r
-            else:\r
-                stat['total_disk_usage'] = None\r
-                stat['disk_usage'] = None\r
-            return stat\r
-\r
-    def get_rbd_list(self, pool_name=None):\r
-        if pool_name:\r
-            pools = [pool_name]\r
-        else:\r
-            pools = []\r
-            for data in self.get_osd_pools():\r
-                pools.append(data['pool_name'])\r
-        result = []\r
-        for pool in pools:\r
-            rbd_inst = rbd.RBD()\r
-            with self._open_connection(str(pool)) as ioctx:\r
-                names = rbd_inst.list(ioctx)\r
-                for name in names:\r
-                    try:\r
-                        stat = self._rbd_image(ioctx, pool_name, name)\r
-                    except rbd.ImageNotFound:\r
-                        continue\r
-                    result.append(stat)\r
-        return result\r
-\r
-    def get_object_pg_info(self, pool_name, object_name):\r
-        result = CommandResult('')\r
-        data_jaon = {}\r
-        self.module.send_command(\r
-            result, 'mon', '', json.dumps({\r
-                'prefix': 'osd map',\r
-                'format': 'json',\r
-                'pool': pool_name,\r
-                'object': object_name,\r
-            }), '')\r
-        ret, outb, outs = result.wait()\r
-        try:\r
-            if outb:\r
-                data_jaon = json.loads(outb)\r
-            else:\r
-                self.module.log.error('unable to get %s pg info' % pool_name)\r
-        except Exception as e:\r
-            self.module.log.error(\r
-                'unable to get %s pg, error: %s' % (pool_name, str(e)))\r
-        return data_jaon\r
-\r
-    @staticmethod\r
-    def _list_objects(ioctx, image_id):\r
-        objects = []\r
-        object_iterator = ioctx.list_objects()\r
-        while True:\r
-            try:\r
-                rados_object = object_iterator.next()\r
-                if image_id is None:\r
-                    objects.append(str(rados_object.key))\r
-                else:\r
-                    v = str(rados_object.key).split('.')\r
-                    if len(v) >= 2 and v[1] == image_id:\r
-                        objects.append(str(rados_object.key))\r
-            except StopIteration:\r
-                break\r
-        return objects\r
-\r
-    def get_rbd_info(self, pool_name, image_name):\r
-        with self._open_connection(pool_name) as ioctx:\r
-            try:\r
-                stat = self._rbd_image(ioctx, pool_name, image_name)\r
-                if stat.get('id'):\r
-                    objects = self._list_objects(ioctx, stat.get('id'))\r
-                    if objects:\r
-                        stat['objects'] = objects\r
-                        stat['pgs'] = list()\r
-                    for obj_name in objects:\r
-                        pgs_data = self.get_object_pg_info(pool_name, obj_name)\r
-                        stat['pgs'].extend([pgs_data])\r
-            except rbd.ImageNotFound:\r
-                stat = {}\r
-        return stat\r
-\r
-    def get_pool_objects(self, pool_name, image_id=None):\r
-        # list_objects\r
-        try:\r
-            with self._open_connection(pool_name) as ioctx:\r
-                objects = self._list_objects(ioctx, image_id)\r
-        except:\r
-            objects = []\r
-        return objects\r
-\r
-    def get_ceph_df_state(self):\r
-        ceph_stats = self.module.get('df').get('stats', {})\r
-        if not ceph_stats:\r
-            return {'total_size': 0, 'avail_size': 0, 'raw_used_size': 0, 'raw_used_percent': 0}\r
-        total_size = round(float(ceph_stats.get('total_bytes', 0)) / GB)\r
-        avail_size = round(float(ceph_stats.get('total_avail_bytes', 0)) / GB, 2)\r
-        raw_used_size = round(float(ceph_stats.get('total_used_bytes', 0)) / GB, 2)\r
-        if total_size != 0:\r
-            raw_used_percent = round(float(raw_used_size) / float(total_size) * 100, 2)\r
-        else:\r
-            raw_used_percent = 0\r
-        return {'total_size': total_size, 'avail_size': avail_size, 'raw_used_size': raw_used_size,\r
-                'used_percent': raw_used_percent}\r
-\r
-    def get_osd_metadata(self, osd_id=None):\r
-        if osd_id is not None:\r
-            return self.module.get('osd_metadata')[str(osd_id)]\r
-        return self.module.get('osd_metadata')\r
-\r
-    def get_mgr_metadata(self, mgr_id):\r
-        return self.module.get_metadata('mgr', mgr_id)\r
-\r
-    def get_osd_epoch(self):\r
-        return self.module.get('osd_map').get('epoch', 0)\r
-\r
-    def get_osds(self):\r
-        return self.module.get('osd_map').get('osds', [])\r
-\r
-    def get_max_osd(self):\r
-        return self.module.get('osd_map').get('max_osd', '')\r
-\r
-    def get_osd_pools(self):\r
-        return self.module.get('osd_map').get('pools', [])\r
-\r
-    def get_pool_bytes_used(self, pool_id):\r
-        bytes_used = None\r
-        pools = self.module.get('df').get('pools', [])\r
-        for pool in pools:\r
-            if pool_id == pool['id']:\r
-                bytes_used = pool['stats']['bytes_used']\r
-        return bytes_used\r
-\r
-    def get_cluster_id(self):\r
-        return self.module.get('mon_map').get('fsid')\r
-\r
-    def get_health_status(self):\r
-        health = json.loads(self.module.get('health')['json'])\r
-        return health.get('status')\r
-\r
-    def get_health_checks(self):\r
-        health = json.loads(self.module.get('health')['json'])\r
-        if health.get('checks'):\r
-            message = ''\r
-            checks = health['checks']\r
-            for key in checks.keys():\r
-                if message:\r
-                    message += ';'\r
-                if checks[key].get('summary', {}).get('message', ''):\r
-                    message += checks[key]['summary']['message']\r
-            return message\r
-        else:\r
-            return ''\r
-\r
-    def get_mons(self):\r
-        return self.module.get('mon_map').get('mons', [])\r
-\r
-    def get_mon_status(self):\r
-        mon_status = json.loads(self.module.get('mon_status')['json'])\r
-        return mon_status\r
-\r
-    def get_osd_smart(self, osd_id, device_id=None):\r
-        osd_devices = []\r
-        osd_smart = {}\r
-        devices = self.module.get('devices')\r
-        for dev in devices.get('devices', []):\r
-            osd = ''\r
-            daemons = dev.get('daemons', [])\r
-            for daemon in daemons:\r
-                if daemon[4:] != str(osd_id):\r
-                    continue\r
-                osd = daemon\r
-            if not osd:\r
-                continue\r
-            if dev.get('devid') and dev.get('devid') not in osd_devices:\r
-                osd_devices.append(dev.get('devid'))\r
-        for dev_id in osd_devices:\r
-            o_key = ''\r
-            if device_id and dev_id != device_id:\r
-                continue\r
-            smart_data = self.get_device_health(dev_id)\r
-            if smart_data:\r
-                o_key = sorted(smart_data.keys(), reverse=True)[0]\r
-            if o_key and smart_data and smart_data.values():\r
-                dev_smart = smart_data[o_key]\r
-                if dev_smart:\r
-                    osd_smart[dev_id] = dev_smart\r
-        return osd_smart\r
-\r
-    def get_device_health(self, devid):\r
-        health_data = {}\r
-        try:\r
-            r, outb, outs = self.module.remote('devicehealth', 'show_device_metrics', devid=devid, sample='')\r
-            if r != 0:\r
-                self.module.log.error('failed to get device %s health', devid)\r
-                health_data = {}\r
-            else:\r
-                health_data = json.loads(outb)\r
-        except Exception as e:\r
-            self.module.log.error('failed to get device %s health data due to %s', devid, str(e))\r
-        return health_data\r
-\r
-    def get_osd_hostname(self, osd_id):\r
-        result = ''\r
-        osd_metadata = self.get_osd_metadata(osd_id)\r
-        if osd_metadata:\r
-            osd_host = osd_metadata.get('hostname', 'None')\r
-            result = osd_host\r
-        return result\r
-\r
-    def get_osd_device_id(self, osd_id):\r
-        result = {}\r
-        if not str(osd_id).isdigit():\r
-            if str(osd_id)[0:4] == 'osd.':\r
-                osdid = osd_id[4:]\r
-            else:\r
-                raise Exception('not a valid <osd.NNN> id or number')\r
-        else:\r
-            osdid = osd_id\r
-        osd_metadata = self.get_osd_metadata(osdid)\r
-        if osd_metadata:\r
-            osd_device_ids = osd_metadata.get('device_ids', '')\r
-            if osd_device_ids:\r
-                result = {}\r
-                for osd_device_id in osd_device_ids.split(','):\r
-                    dev_name = ''\r
-                    if len(str(osd_device_id).split('=')) >= 2:\r
-                        dev_name = osd_device_id.split('=')[0]\r
-                        dev_id = osd_device_id.split('=')[1]\r
-                    else:\r
-                        dev_id = osd_device_id\r
-                    if dev_name:\r
-                        result[dev_name] = {'dev_id': dev_id}\r
-        return result\r
-\r
-    def get_file_systems(self):\r
-        return self.module.get('fs_map').get('filesystems', [])\r
-\r
-    def set_device_life_expectancy(self, device_id, from_date, to_date=None):\r
-        result = CommandResult('')\r
-\r
-        if to_date is None:\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'device set-life-expectancy',\r
-                'devid': device_id,\r
-                'from': from_date\r
-            }), '')\r
-        else:\r
-            self.module.send_command(result, 'mon', '', json.dumps({\r
-                'prefix': 'device set-life-expectancy',\r
-                'devid': device_id,\r
-                'from': from_date,\r
-                'to': to_date\r
-            }), '')\r
-        ret, outb, outs = result.wait()\r
-        if ret != 0:\r
-            self.module.log.error(\r
-                'failed to set device life expectancy, %s' % outs)\r
-        return ret\r
-\r
-    def reset_device_life_expectancy(self, device_id):\r
-        result = CommandResult('')\r
-        self.module.send_command(result, 'mon', '', json.dumps({\r
-            'prefix': 'device rm-life-expectancy',\r
-            'devid': device_id\r
-        }), '')\r
-        ret, outb, outs = result.wait()\r
-        if ret != 0:\r
-            self.module.log.error(\r
-                'failed to reset device life expectancy, %s' % outs)\r
-        return ret\r
-\r
-    def get_server(self, hostname):\r
-        return self.module.get_server(hostname)\r
-\r
-    def get_configuration(self, key):\r
-        return self.module.get_configuration(key)\r
-\r
-    def get_rate(self, svc_type, svc_name, path):\r
-        """returns most recent rate"""\r
-        data = self.module.get_counter(svc_type, svc_name, path)[path]\r
-\r
-        if data and len(data) > 1:\r
-            return differentiate(*data[-2:])\r
-        return 0.0\r
-\r
-    def get_latest(self, daemon_type, daemon_name, counter):\r
-        return self.module.get_latest(daemon_type, daemon_name, counter)\r
-\r
-    def get_pgs_up_by_poolid(self, poolid):\r
-        pgs = {}\r
-        try:\r
-            osd_map = self.module.get_osdmap()\r
-            if not osd_map:\r
-                return {}\r
-            pgs = osd_map.map_pool_pgs_up(int(poolid))\r
-            return pgs\r
-        except:\r
-            return {}\r
diff --git a/src/pybind/mgr/diskprediction_cloud/common/cypher.py b/src/pybind/mgr/diskprediction_cloud/common/cypher.py
deleted file mode 100644 (file)
index 7b7b60e..0000000
+++ /dev/null
@@ -1,71 +0,0 @@
-from __future__ import absolute_import
-
-import time
-
-
-class NodeInfo(object):
-    """ Neo4j Node information """
-    def __init__(self, label, domain_id, name, meta):
-        self.label = label
-        self.domain_id = domain_id
-        self.name = name
-        self.meta = meta
-
-
-class CypherOP(object):
-    """ Cypher Operation """
-
-    @staticmethod
-    def update(node, key, value, timestamp=int(time.time()*(1000**3))):
-        result = ''
-        if isinstance(node, NodeInfo):
-            if key != 'time':
-                cy_value = '\'%s\'' % value
-            else:
-                cy_value = value
-            result = \
-                'set %s.%s=case when %s.time >= %s then %s.%s ELSE %s end' % (
-                    node.label, key, node.label, timestamp, node.label, key,
-                    cy_value)
-        return result
-
-    @staticmethod
-    def create_or_merge(node, timestamp=int(time.time()*(1000**3))):
-        result = ''
-        if isinstance(node, NodeInfo):
-            meta_list = []
-            if isinstance(node.meta, dict):
-                for key, value in node.meta.items():
-                    meta_list.append(CypherOP.update(node, key, value, timestamp))
-            domain_id = '{domainId:\'%s\'}' % node.domain_id
-            if meta_list:
-                result = 'merge (%s:%s %s) %s %s %s' % (
-                    node.label, node.label,
-                    domain_id,
-                    CypherOP.update(node, 'name', node.name, timestamp),
-                    ' '.join(meta_list),
-                    CypherOP.update(node, 'time', timestamp, timestamp))
-            else:
-                result = 'merge (%s:%s %s) %s %s' % (
-                    node.label, node.label,
-                    domain_id,
-                    CypherOP.update(node, 'name', node.name, timestamp),
-                    CypherOP.update(node, 'time', timestamp, timestamp))
-        return result
-
-    @staticmethod
-    def add_link(snode, dnode, relationship, timestamp=None):
-        result = ''
-        if timestamp is None:
-            timestamp = int(time.time()*(1000**3))
-        if isinstance(snode, NodeInfo) and isinstance(dnode, NodeInfo):
-            cy_snode = CypherOP.create_or_merge(snode, timestamp)
-            cy_dnode = CypherOP.create_or_merge(dnode, timestamp)
-            target = snode.label + dnode.label
-            link = 'merge (%s)-[%s:%s]->(%s) set %s.time=case when %s.time >= %s then %s.time ELSE %s end' % (
-                snode.label, target, relationship,
-                dnode.label, target,
-                target, timestamp,
-                target, timestamp)
-            result = '%s %s %s' % (cy_snode, cy_dnode, link)
-        return result
diff --git a/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py b/src/pybind/mgr/diskprediction_cloud/common/grpcclient.py
deleted file mode 100644 (file)
index 5a1d5e7..0000000
+++ /dev/null
@@ -1,242 +0,0 @@
-# vim: tabstop=4 shiftwidth=4 softtabstop=4
-import grpc
-import json
-from logging import getLogger
-import time
-
-from . import DummyResonse
-from . import client_pb2
-from . import client_pb2_grpc
-
-
-def gen_configuration(**kwargs):
-    configuration = {
-        'host': kwargs.get('host', 'api.diskprophet.com'),
-        'user': kwargs.get('user'),
-        'password': kwargs.get('password'),
-        'port': kwargs.get('port', 31400),
-        'mgr_inst': kwargs.get('mgr_inst', None),
-        'cert_context': kwargs.get('cert_context'),
-        'ssl_target_name': kwargs.get('ssl_target_name', 'api.diskprophet.com'),
-        'default_authority': kwargs.get('default_authority', 'api.diskprophet.com')}
-    return configuration
-
-
-class GRPcClient(object):
-
-    def __init__(self, configuration):
-        self.auth = None
-        self.host = configuration.get('host')
-        self.port = configuration.get('port')
-        if configuration.get('user') and configuration.get('password'):
-            self.auth = (
-                ('account', configuration.get('user')),
-                ('password', configuration.get('password')))
-        self.cert_context = configuration.get('cert_context')
-        self.ssl_target_name = configuration.get('ssl_target_name')
-        self.default_authority = configuration.get('default_authority')
-        self.mgr_inst = configuration.get('mgr_inst')
-        if self.mgr_inst:
-            self._logger = self.mgr_inst.log
-        else:
-            self._logger = getLogger()
-        self._client = self._get_channel()
-
-    def close(self):
-        if self._client:
-            self._client.close()
-
-    @staticmethod
-    def connectivity_update(connectivity):
-        pass
-
-    def _get_channel(self):
-        try:
-            creds = grpc.ssl_channel_credentials(
-                root_certificates=self.cert_context)
-            channel = \
-                grpc.secure_channel('{}:{}'.format(
-                    self.host, self.port), creds,
-                    options=(('grpc.ssl_target_name_override', self.ssl_target_name,),
-                             ('grpc.default_authority', self.default_authority),))
-            channel.subscribe(self.connectivity_update, try_to_connect=True)
-            return channel
-        except Exception as e:
-            self._logger.error(
-                'failed to create connection exception: {}'.format(
-                    ';'.join(str(e).split('\n\t'))))
-            return None
-
-    def test_connection(self):
-        try:
-            stub_accout = client_pb2_grpc.AccountStub(self._client)
-            result = stub_accout.AccountHeartbeat(client_pb2.Empty())
-            self._logger.debug('text connection result: {}'.format(str(result)))
-            if result and "is alive" in str(result.message):
-                return True
-            else:
-                return False
-        except Exception as e:
-            self._logger.error(
-                'failed to test connection exception: {}'.format(
-                    ';'.join(str(e).split('\n\t'))))
-            return False
-
-    def _send_metrics(self, data, measurement):
-        status_info = dict()
-        status_info['measurement'] = None
-        status_info['success_count'] = 0
-        status_info['failure_count'] = 0
-        for dp_data in data:
-            d_measurement = dp_data.measurement
-            if not d_measurement:
-                status_info['measurement'] = measurement
-            else:
-                status_info['measurement'] = d_measurement
-            tag_list = []
-            field_list = []
-            for name in dp_data.tags:
-                tag = '{}={}'.format(name, dp_data.tags[name])
-                tag_list.append(tag)
-            for name in dp_data.fields:
-                if dp_data.fields[name] is None:
-                    continue
-                if isinstance(dp_data.fields[name], str):
-                    field = '{}=\"{}\"'.format(name, dp_data.fields[name])
-                elif isinstance(dp_data.fields[name], bool):
-                    field = '{}={}'.format(name,
-                                           str(dp_data.fields[name]).lower())
-                elif (isinstance(dp_data.fields[name], int) or
-                      isinstance(dp_data.fields[name], long)):
-                    field = '{}={}i'.format(name, dp_data.fields[name])
-                else:
-                    field = '{}={}'.format(name, dp_data.fields[name])
-                field_list.append(field)
-            data = '{},{} {} {}'.format(
-                status_info['measurement'],
-                ','.join(tag_list),
-                ','.join(field_list),
-                int(time.time() * 1000 * 1000 * 1000))
-            try:
-                resp = self._send_info(data=[data], measurement=status_info['measurement'])
-                status_code = resp.status_code
-                if 200 <= status_code < 300:
-                    self._logger.debug(
-                        '{} send diskprediction api success(ret: {})'.format(
-                            status_info['measurement'], status_code))
-                    status_info['success_count'] += 1
-                else:
-                    self._logger.error(
-                        'return code: {}, content: {}'.format(
-                            status_code, resp.content))
-                    status_info['failure_count'] += 1
-            except Exception as e:
-                status_info['failure_count'] += 1
-                self._logger.error(str(e))
-        return status_info
-
-    def _send_db_relay(self, data, measurement):
-        status_info = dict()
-        status_info['measurement'] = measurement
-        status_info['success_count'] = 0
-        status_info['failure_count'] = 0
-        for dp_data in data:
-            try:
-                resp = self._send_info(
-                    data=[dp_data.fields['cmd']], measurement=measurement)
-                status_code = resp.status_code
-                if 200 <= status_code < 300:
-                    self._logger.debug(
-                        '{} send diskprediction api success(ret: {})'.format(
-                            measurement, status_code))
-                    status_info['success_count'] += 1
-                else:
-                    self._logger.error(
-                        'return code: {}, content: {}'.format(
-                            status_code, resp.content))
-                    status_info['failure_count'] += 1
-            except Exception as e:
-                status_info['failure_count'] += 1
-                self._logger.error(str(e))
-        return status_info
-
-    def send_info(self, data, measurement):
-        """
-        :param data: data structure
-        :param measurement: data measurement class name
-        :return:
-            status_info = {
-                'success_count': <count>,
-                'failure_count': <count>
-            }
-        """
-        if measurement == 'db_relay':
-            return self._send_db_relay(data, measurement)
-        else:
-            return self._send_metrics(data, measurement)
-
-    def _send_info(self, data, measurement):
-        resp = DummyResonse()
-        try:
-            stub_collection = client_pb2_grpc.CollectionStub(self._client)
-            if measurement == 'db_relay':
-                result = stub_collection.PostDBRelay(
-                    client_pb2.PostDBRelayInput(cmds=data), metadata=self.auth)
-            else:
-                result = stub_collection.PostMetrics(
-                    client_pb2.PostMetricsInput(points=data), metadata=self.auth)
-            if result and 'success' in str(result.message).lower():
-                resp.status_code = 200
-                resp.content = ''
-            else:
-                resp.status_code = 400
-                resp.content = ';'.join(str(result).split('\n\t'))
-                self._logger.error(
-                    'failed to send info: {}'.format(resp.content))
-        except Exception as e:
-            resp.status_code = 400
-            resp.content = ';'.join(str(e).split('\n\t'))
-            self._logger.error(
-                'failed to send info exception: {}'.format(resp.content))
-        return resp
-
-    def query_info(self, host_domain_id, disk_domain_id, measurement):
-        resp = DummyResonse()
-        try:
-            stub_dp = client_pb2_grpc.DiskprophetStub(self._client)
-            predicted = stub_dp.DPGetDisksPrediction(
-                client_pb2.DPGetDisksPredictionInput(
-                    physicalDiskIds=disk_domain_id),
-                metadata=self.auth)
-            if predicted and hasattr(predicted, 'data'):
-                resp.status_code = 200
-                resp.content = ''
-                resp_json = json.loads(predicted.data)
-                rc = resp_json.get('results', [])
-                if rc:
-                    series = rc[0].get('series', [])
-                    if series:
-                        values = series[0].get('values', [])
-                        if not values:
-                            resp.resp_json = {}
-                        else:
-                            columns = series[0].get('columns', [])
-                            for item in values:
-                                # get prediction key and value from server.
-                                for name, value in zip(columns, item):
-                                    # process prediction data
-                                    resp.resp_json[name] = value
-                self._logger.debug("query {}:{} result:{}".format(host_domain_id, disk_domain_id, resp))
-                return resp
-            else:
-                resp.status_code = 400
-                resp.content = ''
-                resp.resp_json = {'error': ';'.join(str(predicted).split('\n\t'))}
-                self._logger.debug("query {}:{} result:{}".format(host_domain_id, disk_domain_id, resp))
-                return resp
-        except Exception as e:
-            resp.status_code = 400
-            resp.content = ';'.join(str(e).split('\n\t'))
-            resp.resp_json = {'error': resp.content}
-            self._logger.debug("query {}:{} result:{}".format(host_domain_id, disk_domain_id, resp))
-            return resp
diff --git a/src/pybind/mgr/diskprediction_cloud/common/server.crt b/src/pybind/mgr/diskprediction_cloud/common/server.crt
deleted file mode 100644 (file)
index d72c9d2..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
------BEGIN CERTIFICATE-----
-MIICsjCCAZoCCQCKLjrHOzCTrDANBgkqhkiG9w0BAQsFADAbMRkwFwYDVQQDDBBh
-cGkuZmVkZXJhdG9yLmFpMB4XDTE4MDgwMjA2NDg0N1oXDTI4MDczMDA2NDg0N1ow
-GzEZMBcGA1UEAwwQYXBpLmZlZGVyYXRvci5haTCCASIwDQYJKoZIhvcNAQEBBQAD
-ggEPADCCAQoCggEBAJkDL/VoLbI+Rc1GXkZwpN8n4e7HhIk1iK98yhXegoH8g6ZZ
-uVVlUW/zNO0V5W9IgiSBqEWOEf9heWj7mIbbxl437W1LpR4V0LKR2dbY7ZMwlB3L
-ZJYxtziZYu1g4Fn9hDnVJIXVmXFpF62wHd2ZSY7FyUF/OGetxLSfoOMkTHY8A8HB
-92vQfoFjgx1e23lLgTO2VpucmU/qXiF+xI/K6kkrMnGJi4xBL29i3aKRRNktVUHf
-Zs6JhBKl4sbvkW5m5AECW4c0XxVJotTLoPUjx4rxp0k5S1aQSYSS+0z96eVY0w8J
-ungiWEj7lLqwEGKjOzfjDLsczZIcZZcQSQwb3qcCAwEAATANBgkqhkiG9w0BAQsF
-AAOCAQEADwfBrHsvPmUD8CTx8lpVcqrOlHc7ftW3hb11vWwwfJw4fBiJ8DoB496x
-SAP2CJyDnSLdyvVueKLjiRFBm96W76nbMeP9+CkktGRUbLjkByv/v+7WSxRrukDC
-yR6IXqQJe4ADcYkVYoUMx3frBQzFtS7hni0FPvl3AN55TvTXqed61CdN9zdw9Ezn
-yn0oy3BbT5h/zNHefTQBzgQhW62C5YdTRtS6VVWV/k1kLz0GVG1eMtAqueUCxFeM
-g1mXYz2/Cm5C8pszZfiP+a/QV1z/3QgRUp0i0yVLiteqNDCPv6bc767VQEuXok9p
-NDuKElVxdA0WD9cbnBXiyfeMOQnjQw==
------END CERTIFICATE-----
diff --git a/src/pybind/mgr/diskprediction_cloud/module.py b/src/pybind/mgr/diskprediction_cloud/module.py
deleted file mode 100644 (file)
index 50b88e8..0000000
+++ /dev/null
@@ -1,454 +0,0 @@
-"""
-diskprediction with cloud predictor
-"""
-from __future__ import absolute_import
-
-import base64
-from datetime import datetime
-import errno
-import json
-from mgr_module import MgrModule
-import os
-from threading import Event
-
-try:
-    from string import maketrans
-except ImportError:
-    maketrans = str.maketrans
-
-from .common import DP_MGR_STAT_ENABLED, DP_MGR_STAT_DISABLED
-from .task import MetricsRunner, SmartRunner, PredictRunner, TestRunner
-
-TIME_DAYS = 24*60*60
-TIME_WEEK = TIME_DAYS * 7
-DP_AGENTS = [MetricsRunner, SmartRunner, PredictRunner]
-CUSTOMER_ALPHABET = "ABCDEFG&HIJKLMN@OQRS.TUV(WXYZabcd)efghijlmn-opqrstu*vwxyz0123=45"
-ORIGIN_ALPHABET = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
-
-
-def get_transtable():
-    transtable = maketrans(ORIGIN_ALPHABET, CUSTOMER_ALPHABET)
-    return transtable
-
-
-def get_reverse_transtable():
-    transtable = maketrans(CUSTOMER_ALPHABET, ORIGIN_ALPHABET)
-    return transtable
-
-
-def encode_string(value):
-    if len(value) == 0:
-        return ""
-    transtable = get_transtable()
-    e = str((base64.b64encode(str(value).encode())).decode("utf-8"))
-    e = e.rstrip("=")
-    return e.translate(transtable)
-
-
-class Module(MgrModule):
-
-    MODULE_OPTIONS = [
-        {
-            'name': 'diskprediction_server',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_port',
-            'default': '31400'
-        },
-        {
-            'name': 'diskprediction_user',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_password',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_upload_metrics_interval',
-            'default': '600'
-        },
-        {
-            'name': 'diskprediction_upload_smart_interval',
-            'default': '43200'
-        },
-        {
-            'name': 'diskprediction_retrieve_prediction_interval',
-            'default': '43200'
-        },
-        {
-            'name': 'diskprediction_cert_context',
-            'default': ''
-        },
-        {
-            'name': 'diskprediction_ssl_target_name_override',
-            'default': 'localhost'
-        },
-        {
-            'name': 'diskprediction_default_authority',
-            'default': 'localhost'
-        },
-        {
-            'name': 'sleep_interval',
-            'default': str(600),
-        }
-    ]
-
-    COMMANDS = [
-        {
-            'cmd': 'device show-prediction-config',
-            'desc': 'Prints diskprediction configuration',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'device set-cloud-prediction-config '
-                   'name=server,type=CephString,req=true '
-                   'name=user,type=CephString,req=true '
-                   'name=password,type=CephString,req=true '
-                   'name=certfile,type=CephString,req=true '
-                   'name=port,type=CephString,req=false ',
-            'desc': 'Configure Disk Prediction service',
-            'perm': 'rw'
-        },
-        {
-            'cmd': 'device debug metrics-forced',
-            'desc': 'Run metrics agent forced',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'device debug smart-forced',
-            'desc': 'Run smart agent forced',
-            'perm': 'r'
-        },
-        {
-            'cmd': 'diskprediction_cloud status',
-            'desc': 'Check diskprediction_cloud status',
-            'perm': 'r'
-        }
-    ]
-
-    def __init__(self, *args, **kwargs):
-        super(Module, self).__init__(*args, **kwargs)
-        self.status = {'status': DP_MGR_STAT_DISABLED}
-        self._event = Event()
-        self._predict_event = Event()
-        self._agents = []
-        self._activated_cloud = False
-        self.prediction_result = {}
-        self.config = dict()
-        self._run = True
-
-    def config_notify(self):
-        for opt in self.MODULE_OPTIONS:
-            setattr(self,
-                    opt['name'],
-                    self.get_module_option(opt['name']))
-            self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
-        if not self._activated_cloud and self.get_ceph_option('device_failure_prediction_mode') == 'cloud':
-            self._event.set()
-        if self._activated_cloud and self.get_ceph_option('device_failure_prediction_mode') != 'cloud':
-            self._event.set()
-
-    @property
-    def config_keys(self):
-        return dict((o['name'], o.get('default', None)) for o in self.MODULE_OPTIONS)
-
-    def set_config_option(self, option, value):
-        if option not in self.config_keys.keys():
-            raise RuntimeError('{0} is a unknown configuration '
-                               'option'.format(option))
-
-        if option in ['diskprediction_port',
-                      'diskprediction_upload_metrics_interval',
-                      'diskprediction_upload_smart_interval',
-                      'diskprediction_retrieve_prediction_interval']:
-            if not str(value).isdigit():
-                raise RuntimeError('invalid {} configured. Please specify '
-                                   'a valid integer {}'.format(option, value))
-
-        self.log.debug('Setting in-memory config option %s to: %s', option,
-                       value)
-        self.set_module_option(option, value)
-        self.config[option] = value
-
-        return True
-
-    def get_configuration(self, key):
-        return self.get_module_option(key, self.config_keys[key])
-
-    @staticmethod
-    def _convert_timestamp(predicted_timestamp, life_expectancy_day):
-        """
-        :param predicted_timestamp: unit is nanoseconds
-        :param life_expectancy_day: unit is seconds
-        :return:
-            date format '%Y-%m-%d' ex. 2018-01-01
-        """
-        return datetime.fromtimestamp(
-            predicted_timestamp / (1000 ** 3) + life_expectancy_day).strftime('%Y-%m-%d')
-
-    def _show_prediction_config(self, cmd):
-        self.show_module_config()
-        return 0, json.dumps(self.config, indent=4, sort_keys=True), ''
-
-    def _set_ssl_target_name(self, cmd):
-        str_ssl_target = cmd.get('ssl_target_name', '')
-        try:
-            self.set_module_option('diskprediction_ssl_target_name_override', str_ssl_target)
-            return (0,
-                    'success to config ssl target name', 0)
-        except Exception as e:
-            return -errno.EINVAL, '', str(e)
-
-    def _set_ssl_default_authority(self, cmd):
-        str_ssl_authority = cmd.get('ssl_authority', '')
-        try:
-            self.set_module_option('diskprediction_default_authority', str_ssl_authority)
-            return 0, 'success to config ssl default authority', 0
-        except Exception as e:
-            return -errno.EINVAL, '', str(e)
-
-    def _set_cloud_prediction_config(self, cmd):
-        str_cert_path = cmd.get('certfile', '')
-        if os.path.exists(str_cert_path):
-            with open(str_cert_path, 'rb') as f:
-                trusted_certs = f.read()
-            self.set_config_option(
-                'diskprediction_cert_context', trusted_certs)
-            for _agent in self._agents:
-                _agent.event.set()
-            self.set_module_option('diskprediction_server', cmd['server'])
-            self.set_module_option('diskprediction_user', cmd['user'])
-            self.set_module_option('diskprediction_password', encode_string(cmd['password']))
-            if cmd.get('port'):
-                self.set_module_option('diskprediction_port', cmd['port'])
-            return 0, 'succeed to config cloud mode connection', ''
-        else:
-            return -errno.EINVAL, '', 'certification file not existed'
-
-    def _debug_metrics_forced(self, cmd):
-        msg = ''
-        for _agent in self._agents:
-            if isinstance(_agent, MetricsRunner):
-                msg = 'run metrics agent successfully'
-                _agent.event.set()
-        return 0, msg, ''
-
-    def _debug_smart_forced(self, cmd):
-        msg = ' '
-        for _agent in self._agents:
-            if isinstance(_agent, SmartRunner):
-                msg = 'run smart agent successfully'
-                _agent.event.set()
-        return 0, msg, ''
-
-    def refresh_config(self):
-        for opt in self.MODULE_OPTIONS:
-            setattr(self,
-                    opt['name'],
-                    self.get_module_option(opt['name']))
-            self.log.debug(' %s = %s', opt['name'], getattr(self, opt['name']))
-
-    def _status(self,  cmd):
-        return 0, json.dumps(self.status), ''
-
-    def _refresh_cloud_prediction_result(self):
-        for _agent in self._agents:
-            if isinstance(_agent, PredictRunner):
-                self._predict_event.clear()
-                _agent.event.set()
-                self._predict_event.wait(300)
-                if self._predict_event.is_set():
-                    self._predict_event.clear()
-                break
-
-    def predict_life_expectancy(self, devid):
-        assert devid
-        result = self.get('device {}'.format(devid))
-        if not result:
-            return -1, '', 'device {} not found'.format(devid)
-        dev_info = result.get('device', {})
-        if not dev_info:
-            return -1, '', 'device {} not found'.format(devid)
-        self._refresh_cloud_prediction_result()
-        prediction_data = self.prediction_result.get(devid)
-        if not prediction_data:
-            return -1, '', 'device {} prediction data not ready'.format(devid)
-        elif prediction_data.get('near_failure', '').lower() == 'good':
-            return 0, '>6w', ''
-        elif prediction_data.get('near_failure', '').lower() == 'warning':
-            return 0, '>=2w and <=6w', ''
-        elif prediction_data.get('near_failure', '').lower() == 'bad':
-            return 0, '<2w', ''
-        else:
-            return 0, 'unknown', ''
-
-    def _update_device_life_expectancy_day(self, devid, prediction):
-        # Update osd life-expectancy
-        from .common.clusterdata import ClusterAPI
-        predicted = None
-        life_expectancy_day_min = None
-        life_expectancy_day_max = None
-        if prediction.get('predicted'):
-            predicted = int(prediction['predicted'])
-        if prediction.get('near_failure'):
-            if prediction['near_failure'].lower() == 'good':
-                life_expectancy_day_min = (TIME_WEEK * 6) + TIME_DAYS
-                life_expectancy_day_max = None
-            elif prediction['near_failure'].lower() == 'warning':
-                life_expectancy_day_min = (TIME_WEEK * 2)
-                life_expectancy_day_max = (TIME_WEEK * 6)
-            elif prediction['near_failure'].lower() == 'bad':
-                life_expectancy_day_min = 0
-                life_expectancy_day_max = (TIME_WEEK * 2) - TIME_DAYS
-            else:
-                # Near failure state is unknown.
-                predicted = None
-                life_expectancy_day_min = None
-                life_expectancy_day_max = None
-
-        obj_api = ClusterAPI(self)
-        if predicted and devid and life_expectancy_day_min is not None:
-            from_date = None
-            to_date = None
-            try:
-                if life_expectancy_day_min is not None:
-                    from_date = self._convert_timestamp(predicted, life_expectancy_day_min)
-
-                if life_expectancy_day_max is not None:
-                    to_date = self._convert_timestamp(predicted, life_expectancy_day_max)
-
-                obj_api.set_device_life_expectancy(devid, from_date, to_date)
-                self.log.info(
-                    'succeed to set device {} life expectancy from: {}, to: {}'.format(
-                        devid, from_date, to_date))
-            except Exception as e:
-                self.log.error(
-                    'failed to set device {} life expectancy from: {}, to: {}, {}'.format(
-                        devid, from_date, to_date, str(e)))
-        else:
-            obj_api.reset_device_life_expectancy(devid)
-
-    def predict_all_devices(self):
-        if not self._activated_cloud:
-            return -1, '', 'diskprecition_cloud not ready'
-        self.refresh_config()
-        result = self.get('devices')
-        if not result:
-            return -1, '', 'unable to get all devices for prediction'
-        self._refresh_cloud_prediction_result()
-        for dev in result.get('devices', []):
-            devid = dev.get('devid')
-            if not devid:
-                continue
-            prediction_data = self.prediction_result.get(devid)
-            if prediction_data:
-                break
-            if not prediction_data:
-                return -1, '', 'device {} prediction data not ready'.format(dev.get('devid'))
-            else:
-                self._update_device_life_expectancy_day(dev.get('devid'), prediction_data)
-        return 0, '', ''
-
-    def handle_command(self, _, cmd):
-        for o_cmd in self.COMMANDS:
-            if cmd['prefix'] == o_cmd['cmd'][:len(cmd['prefix'])]:
-                fun_name = ''
-                avgs = o_cmd['cmd'].split(' ')
-                for avg in avgs:
-                    if avg.lower() == 'diskprediction_cloud':
-                        continue
-                    if avg.lower() == 'device':
-                        continue
-                    if '=' in avg or ',' in avg or not avg:
-                        continue
-                    fun_name += '_%s' % avg.replace('-', '_')
-                if fun_name:
-                    fun = getattr(
-                        self, fun_name)
-                    if fun:
-                        return fun(cmd)
-        return -errno.EINVAL, '', 'cmd not found'
-
-    def show_module_config(self):
-        for key, default in self.config_keys.items():
-            self.set_config_option(key, self.get_module_option(key, default))
-
-    def serve(self):
-        self.log.info('Starting diskprediction module')
-        self.config_notify()
-        self.status = {'status': DP_MGR_STAT_ENABLED}
-
-        while self._run:
-            self.refresh_config()
-            mode = self.get_ceph_option('device_failure_prediction_mode')
-            if mode == 'cloud':
-                if not self._activated_cloud:
-                    self.start_cloud_disk_prediction()
-            else:
-                if self._activated_cloud:
-                    self.stop_disk_prediction()
-
-            # Check agent hang is?
-            restart_agent = False
-            try:
-                for dp_agent in self._agents:
-                    if dp_agent.is_timeout():
-                        self.log.error('agent name: {] timeout'.format(dp_agent.task_name))
-                        restart_agent = True
-                        break
-            except Exception as IOError:
-                self.log.error('disk prediction plugin failed to started and try to restart')
-                restart_agent = True
-
-            if restart_agent:
-                self.stop_disk_prediction()
-            else:
-                sleep_interval = int(self.sleep_interval) or 60
-                self._event.wait(sleep_interval)
-                self._event.clear()
-        self.stop_disk_prediction()
-
-    def _agent_call_back(self):
-        self.log.debug('notify refresh devices prediction result')
-        self._predict_event.set()
-
-    def start_cloud_disk_prediction(self):
-        assert not self._activated_cloud
-        for dp_agent in DP_AGENTS:
-            if dp_agent == PredictRunner:
-                obj_agent = dp_agent(self, 300, self._agent_call_back)
-            else:
-                obj_agent = dp_agent(self, 300)
-            if obj_agent:
-                obj_agent.start()
-            else:
-                raise Exception('failed to start task %s' % obj_agent.task_name)
-            self._agents.append(obj_agent)
-        self._activated_cloud = True
-        self.log.info('start cloud disk prediction')
-
-    def stop_disk_prediction(self):
-        assert self._activated_cloud
-        try:
-            self.status = {'status': DP_MGR_STAT_DISABLED}
-            while self._agents:
-                dp_agent = self._agents.pop()
-                self.log.info('agent name: {}'.format(dp_agent.task_name))
-                dp_agent.terminate()
-                dp_agent.join(5)
-                del dp_agent
-            self._activated_cloud = False
-            self.log.info('stop disk prediction')
-        except Exception as IOError:
-            self.log.error('failed to stop disk prediction clould plugin')
-
-    def shutdown(self):
-        self._run = False
-        self._event.set()
-        super(Module, self).shutdown()
-
-    def self_test(self):
-        obj_test = TestRunner(self)
-        obj_test.run()
-        self.log.info('self test completed')
diff --git a/src/pybind/mgr/diskprediction_cloud/requirements.txt b/src/pybind/mgr/diskprediction_cloud/requirements.txt
deleted file mode 100644 (file)
index 82a6da8..0000000
+++ /dev/null
@@ -1,8 +0,0 @@
-google-api-python-client==1.7.3
-google-auth==1.5.0
-google-auth-httplib2==0.0.3
-googleapis-common-protos==1.5.3
-grpc-google-logging-v2>=0.8.1
-grpc-google-pubsub-v1>=0.8.1
-grpcio>=1.14.1
-
diff --git a/src/pybind/mgr/diskprediction_cloud/task.py b/src/pybind/mgr/diskprediction_cloud/task.py
deleted file mode 100644 (file)
index 6ed04e6..0000000
+++ /dev/null
@@ -1,181 +0,0 @@
-from __future__ import absolute_import\r
-\r
-import time\r
-from threading import Event, Thread\r
-\r
-from .agent.predictor import PredictAgent\r
-from .agent.metrics.ceph_cluster import CephClusterAgent\r
-from .agent.metrics.ceph_mon_osd import CephMonOsdAgent\r
-from .agent.metrics.ceph_pool import CephPoolAgent\r
-from .agent.metrics.db_relay import DBRelayAgent\r
-from .agent.metrics.sai_agent import SAIAgent\r
-from .agent.metrics.sai_cluster import SAICluserAgent\r
-from .agent.metrics.sai_disk import SAIDiskAgent\r
-from .agent.metrics.sai_disk_smart import SAIDiskSmartAgent\r
-from .agent.metrics.sai_host import SAIHostAgent\r
-from .common import DP_MGR_STAT_FAILED, DP_MGR_STAT_OK, DP_MGR_STAT_WARNING\r
-\r
-\r
-class AgentRunner(Thread):\r
-\r
-    task_name = ''\r
-    interval_key = ''\r
-    agents = []\r
-\r
-    def __init__(self, mgr_module, agent_timeout=60, call_back=None):\r
-        """\r
-\r
-        :param mgr_module: parent ceph mgr module\r
-        :param agent_timeout: (unit seconds) agent execute timeout value, default: 60 secs\r
-        """\r
-        Thread.__init__(self)\r
-        self._agent_timeout = agent_timeout\r
-        self._module_inst = mgr_module\r
-        self._log = mgr_module.log\r
-        self._start_time = time.time()\r
-        self._th = None\r
-        self._call_back = call_back\r
-        self.exit = False\r
-        self.event = Event()\r
-        self.task_interval = \\r
-            int(self._module_inst.get_configuration(self.interval_key))\r
-\r
-    def terminate(self):\r
-        self.exit = True\r
-        self.event.set()\r
-        self._log.info('PDS terminate %s complete' % self.task_name)\r
-\r
-    def run(self):\r
-        self._start_time = time.time()\r
-        self._log.info(\r
-            'start %s, interval: %s'\r
-            % (self.task_name, self.task_interval))\r
-        while not self.exit:\r
-            self.run_agents()\r
-            if self._call_back:\r
-                self._call_back()\r
-            if self.event:\r
-                self.event.wait(int(self.task_interval))\r
-                self.event.clear()\r
-                self._log.info(\r
-                    'completed %s(%s)' % (self.task_name, time.time()-self._start_time))\r
-\r
-    def run_agents(self):\r
-        obj_sender = None\r
-        try:\r
-            self._log.debug('run_agents %s' % self.task_name)\r
-            from .common.grpcclient import GRPcClient, gen_configuration\r
-            conf = gen_configuration(\r
-                host=self._module_inst.get_configuration('diskprediction_server'),\r
-                user=self._module_inst.get_configuration('diskprediction_user'),\r
-                password=self._module_inst.get_configuration(\r
-                    'diskprediction_password'),\r
-                port=self._module_inst.get_configuration('diskprediction_port'),\r
-                cert_context=self._module_inst.get_configuration('diskprediction_cert_context'),\r
-                mgr_inst=self._module_inst,\r
-                ssl_target_name=self._module_inst.get_configuration('diskprediction_ssl_target_name_override'),\r
-                default_authority=self._module_inst.get_configuration('diskprediction_default_authority'))\r
-            obj_sender = GRPcClient(conf)\r
-            if not obj_sender:\r
-                self._log.error('invalid diskprediction sender')\r
-                self._module_inst.status = \\r
-                    {'status': DP_MGR_STAT_FAILED,\r
-                     'reason': 'invalid diskprediction sender'}\r
-                raise Exception('invalid diskprediction sender')\r
-            if obj_sender.test_connection():\r
-                self._module_inst.status = {'status': DP_MGR_STAT_OK}\r
-                self._log.debug('succeed to test connection')\r
-                self._run(self._module_inst, obj_sender)\r
-            else:\r
-                self._log.error('failed to test connection')\r
-                self._module_inst.status = \\r
-                    {'status': DP_MGR_STAT_FAILED,\r
-                     'reason': 'failed to test connection'}\r
-                raise Exception('failed to test connection')\r
-        except Exception as e:\r
-            self._module_inst.status = \\r
-                {'status': DP_MGR_STAT_FAILED,\r
-                 'reason': 'failed to start %s agents, %s'\r
-                           % (self.task_name, str(e))}\r
-            self._log.error(\r
-                'failed to start %s agents, %s' % (self.task_name, str(e)))\r
-            raise\r
-        finally:\r
-            if obj_sender:\r
-                obj_sender.close()\r
-\r
-    def is_timeout(self):\r
-        now = time.time()\r
-        if (now - self._start_time) > self._agent_timeout:\r
-            return True\r
-        else:\r
-            return False\r
-\r
-    def _run(self, module_inst, sender):\r
-        self._log.debug('%s run' % self.task_name)\r
-        for agent in self.agents:\r
-            self._start_time = time.time()\r
-            retry_count = 3\r
-            while retry_count:\r
-                retry_count -= 1\r
-                try:\r
-                    obj_agent = agent(module_inst, sender, self._agent_timeout)\r
-                    obj_agent.run()\r
-                    del obj_agent\r
-                    break\r
-                except Exception as e:\r
-                    if str(e).find('configuring') >= 0:\r
-                        self._log.debug(\r
-                            'failed to execute {}, {}, retry again.'.format(\r
-                                agent.measurement, str(e)))\r
-                        time.sleep(1)\r
-                        continue\r
-                    else:\r
-                        module_inst.status = \\r
-                            {'status': DP_MGR_STAT_WARNING,\r
-                             'reason': 'failed to execute {}, {}'.format(\r
-                                agent.measurement, ';'.join(str(e).split('\n\t')))}\r
-                        self._log.warning(\r
-                            'failed to execute {}, {}'.format(\r
-                                agent.measurement, ';'.join(str(e).split('\n\t'))))\r
-                        break\r
-\r
-\r
-class MetricsRunner(AgentRunner):\r
-\r
-    task_name = 'Metrics Agent'\r
-    interval_key = 'diskprediction_upload_metrics_interval'\r
-    agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent,\r
-              SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent,\r
-              SAIAgent]\r
-\r
-\r
-class PredictRunner(AgentRunner):\r
-\r
-    task_name = 'Predictor Agent'\r
-    interval_key = 'diskprediction_retrieve_prediction_interval'\r
-    agents = [PredictAgent]\r
-\r
-\r
-class SmartRunner(AgentRunner):\r
-\r
-    task_name = 'Smart data Agent'\r
-    interval_key = 'diskprediction_upload_smart_interval'\r
-    agents = [SAIDiskSmartAgent]\r
-\r
-\r
-class TestRunner(object):\r
-    task_name = 'Test Agent'\r
-    interval_key = 'diskprediction_upload_metrics_interval'\r
-    agents = [CephClusterAgent, CephMonOsdAgent, CephPoolAgent,\r
-              SAICluserAgent, SAIDiskAgent, SAIHostAgent, DBRelayAgent,\r
-              SAIAgent, SAIDiskSmartAgent]\r
-\r
-    def __init__(self, mgr_module):\r
-        self._module_inst = mgr_module\r
-\r
-    def run(self):\r
-        for agent in self.agents:\r
-            obj_agent = agent(self._module_inst, None)\r
-            obj_agent.run()\r
-            del obj_agent\r