]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-deploy.git/commitdiff
fix imports on top level modules
authorAlfredo Deza <alfredo.deza@inktank.com>
Tue, 24 Jun 2014 19:37:39 +0000 (15:37 -0400)
committerAlfredo Deza <alfredo.deza@inktank.com>
Tue, 24 Jun 2014 19:37:39 +0000 (15:37 -0400)
Signed-off-by: Alfredo Deza <alfredo.deza@inktank.com>
ceph_deploy/calamari.py
ceph_deploy/connection.py
ceph_deploy/install.py
ceph_deploy/mds.py
ceph_deploy/mon.py
ceph_deploy/new.py
ceph_deploy/osd.py

index a7e0617740ab203a687e7f8bfbe3aef7c0cd3499..0c835baabfb709428ca916e8ae897847883a08a9 100644 (file)
@@ -2,7 +2,7 @@ import errno
 import logging
 import os
 from ceph_deploy import hosts, exc
-from ceph_deploy.lib.remoto import process
+from ceph_deploy.lib import remoto
 
 
 LOG = logging.getLogger(__name__)
@@ -96,12 +96,12 @@ def connect(args):
 
         # redhat/centos need to get the service started
         if distro.normalized_name in ['redhat', 'centos']:
-            process.run(
+            remoto.process.run(
                 distro.conn,
                 ['chkconfig', 'salt-minion', 'on']
             )
 
-            process.run(
+            remoto.process.run(
                 distro.conn,
                 ['service', 'salt-minion', 'start']
             )
index b88c1ac62aecf54e433eb60843f8286c743d6ef2..b06b9e34576502dc761229d19cbca8bba818d652 100644 (file)
@@ -1,6 +1,6 @@
 import getpass
 import socket
-from ceph_deploy.lib.remoto import Connection
+from ceph_deploy.lib import remoto
 
 
 def get_connection(hostname, username, logger, threads=5, use_sudo=None):
@@ -13,7 +13,7 @@ def get_connection(hostname, username, logger, threads=5, use_sudo=None):
     if username:
         hostname = "%s@%s" % (username, hostname)
     try:
-        conn = Connection(
+        conn = remoto.Connection(
             hostname,
             logger=logger,
             sudo=use_sudo,
index 2605568c256ac625dda93d8d99c3f3b51220f262..05a5e3a0052551208c5e6130f90f47a81a8ea0c0 100644 (file)
@@ -4,7 +4,7 @@ import os
 
 from ceph_deploy import hosts
 from ceph_deploy.cliutil import priority
-from ceph_deploy.lib.remoto import process, rsync
+from ceph_deploy.lib import remoto
 
 
 LOG = logging.getLogger(__name__)
@@ -67,7 +67,7 @@ def install(args):
             gpg_url = gpg_fallback
 
         if args.local_mirror:
-            rsync(hostname, args.local_mirror, '/opt/ceph-deploy/repo', distro.conn.logger, sudo=True)
+            remoto.rsync(hostname, args.local_mirror, '/opt/ceph-deploy/repo', distro.conn.logger, sudo=True)
             repo_url = 'file:///opt/ceph-deploy/repo'
             gpg_url = 'file:///opt/ceph-deploy/repo/release.asc'
 
@@ -251,7 +251,7 @@ def purgedata(args):
 
         # Try to remove the contents of /var/lib/ceph first, don't worry
         # about errors here, we deal with them later on
-        process.check(
+        remoto.process.check(
             distro.conn,
             [
                 'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
@@ -264,7 +264,7 @@ def purgedata(args):
             rlogger.warning(
                 'OSDs may still be mounted, trying to unmount them'
             )
-            process.run(
+            remoto.process.run(
                 distro.conn,
                 [
                     'find', '/var/lib/ceph',
@@ -277,14 +277,14 @@ def purgedata(args):
 
             # And now we try again to remove the contents, since OSDs should be
             # unmounted, but this time we do check for errors
-            process.run(
+            remoto.process.run(
                 distro.conn,
                 [
                     'rm', '-rf', '--one-file-system', '--', '/var/lib/ceph',
                 ]
             )
 
-        process.run(
+        remoto.process.run(
             distro.conn,
             [
                 'rm', '-rf', '--one-file-system', '--', '/etc/ceph/',
index 3f8ba7e38e4455390fa648d7d6aaad3d144e2824..49fee9485a732eae19ec8408bc94b18f1c95d739 100644 (file)
@@ -6,7 +6,7 @@ import os
 from ceph_deploy import conf
 from ceph_deploy import exc
 from ceph_deploy import hosts
-from ceph_deploy.lib.remoto import process
+from ceph_deploy.lib import remoto
 from ceph_deploy.cliutil import priority
 
 
@@ -40,7 +40,7 @@ def create_mds(conn, name, cluster, init):
 
     keypath = os.path.join(path, 'keyring')
 
-    stdout, stderr, returncode = process.check(
+    stdout, stderr, returncode = remoto.process.check(
         conn,
         [
             'ceph',
@@ -64,7 +64,7 @@ def create_mds(conn, name, cluster, init):
         conn.logger.error('exit code from command was: %s' % returncode)
         raise RuntimeError('could not create mds')
 
-        process.check(
+        remoto.process.check(
             conn,
             [
                 'ceph',
@@ -84,7 +84,7 @@ def create_mds(conn, name, cluster, init):
     conn.remote_module.touch_file(os.path.join(path, init))
 
     if init == 'upstart':
-        process.run(
+        remoto.process.run(
             conn,
             [
                 'initctl',
@@ -96,7 +96,7 @@ def create_mds(conn, name, cluster, init):
             timeout=7
         )
     elif init == 'sysvinit':
-        process.run(
+        remoto.process.run(
             conn,
             [
                 'service',
index 404b2b37a5be716c37fe1ed8f639124cbe393064..2b6c177ea3728c30b2707be1858ec03aecc75ae1 100644 (file)
@@ -9,7 +9,7 @@ import time
 from ceph_deploy import conf, exc, admin
 from ceph_deploy.cliutil import priority
 from ceph_deploy.util import paths, net
-from ceph_deploy.lib.remoto import process
+from ceph_deploy.lib import remoto
 from ceph_deploy import hosts
 from ceph_deploy.misc import mon_hosts
 from ceph_deploy.connection import get_connection
@@ -30,7 +30,7 @@ def mon_status_check(conn, logger, hostname, args):
     """
     asok_path = paths.mon.asok(args.cluster, hostname)
 
-    out, err, code = process.check(
+    out, err, code = remoto.process.check(
         conn,
         [
             'ceph',
@@ -246,7 +246,7 @@ def destroy_mon(conn, cluster, hostname):
 
     if conn.remote_module.path_exists(path):
         # remove from cluster
-        process.run(
+        remoto.process.run(
             conn,
             [
                 'ceph',
@@ -295,7 +295,7 @@ def destroy_mon(conn, cluster, hostname):
             stamp=datetime.datetime.utcnow().strftime("%Y-%m-%dZ%H:%M:%S"),
             )
 
-        process.run(
+        remoto.process.run(
             conn,
             [
                 'mkdir',
@@ -479,7 +479,7 @@ def is_running(conn, args):
         mon.mira094: dead {"version":"0.61.5"}
         mon.mira094: not running {"version":"0.61.5"}
     """
-    stdout, stderr, _ = process.check(
+    stdout, stderr, _ = remoto.process.check(
         conn,
         args
     )
index 8e7b3b22e2d38c8be5377d0e0eb3374b942a60dc..3af1403825be3edf787d3270557821cb779caa17 100644 (file)
@@ -11,7 +11,7 @@ from ceph_deploy.cliutil import priority
 from ceph_deploy import conf, hosts, exc
 from ceph_deploy.util import arg_validators, ssh, net
 from ceph_deploy.misc import mon_hosts
-from ceph_deploy.lib.remoto import process
+from ceph_deploy.lib import remoto
 from ceph_deploy.connection import get_local_connection
 
 
@@ -43,7 +43,7 @@ def ssh_copy_keys(hostname, username=None):
     if not os.path.exists(id_rsa_file):
         LOG.info('creating a passwordless id_rsa.pub key file')
         with get_local_connection(LOG) as conn:
-            process.run(
+            remoto.process.run(
                 conn,
                 [
                     'ssh-keygen',
index 7fc405d83a3714144f7255084cdc064901819267..608084d858f1f2ede5ae420d6b957d85a4646577 100644 (file)
@@ -12,7 +12,7 @@ from cStringIO import StringIO
 from ceph_deploy import conf, exc, hosts
 from ceph_deploy.util import constants
 from ceph_deploy.cliutil import priority
-from ceph_deploy.lib.remoto import process
+from ceph_deploy.lib import remoto
 
 
 LOG = logging.getLogger(__name__)
@@ -42,7 +42,7 @@ def create_osd(conn, cluster, key):
         logger.warning('osd keyring does not exist yet, creating one')
         conn.remote_module.write_keyring(path, key)
 
-    return process.run(
+    return remoto.process.run(
         conn,
         [
             'udevadm',
@@ -79,7 +79,7 @@ def osd_tree(conn, cluster):
         '--format=json',
     ]
 
-    out, err, code = process.check(
+    out, err, code = remoto.process.check(
         conn,
         command,
     )
@@ -125,7 +125,7 @@ def osd_status_check(conn, cluster):
     ]
 
     try:
-        out, err, code = process.check(
+        out, err, code = remoto.process.check(
             conn,
             command,
         )
@@ -224,13 +224,13 @@ def prepare_disk(
     if journal is not None:
         args.append(journal)
 
-    process.run(
+    remoto.process.run(
         conn,
         args
     )
 
     if activate_prepared_disk:
-        return process.run(
+        return remoto.process.run(
             conn,
             [
                 'udevadm',
@@ -330,7 +330,7 @@ def activate(args, cfg):
         LOG.debug('activating host %s disk %s', hostname, disk)
         LOG.debug('will use init type: %s', distro.init)
 
-        process.run(
+        remoto.process.run(
             distro.conn,
             [
                 'ceph-disk-activate',
@@ -365,7 +365,7 @@ def disk_zap(args):
         # zero the device
         distro.conn.remote_module.zeroing(disk)
 
-        process.run(
+        remoto.process.run(
             distro.conn,
             [
                 'sgdisk',
@@ -390,7 +390,7 @@ def disk_list(args, cfg):
         )
 
         LOG.debug('Listing disks on {hostname}...'.format(hostname=hostname))
-        process.run(
+        remoto.process.run(
             distro.conn,
             [
                 'ceph-disk',
@@ -425,7 +425,7 @@ def osd_list(args, cfg):
         remote_module = distro.conn.remote_module
         osds = distro.conn.remote_module.listdir(constants.osd_path)
 
-        output, err, exit_code = process.check(
+        output, err, exit_code = remoto.process.check(
             distro.conn,
             [
                 'ceph-disk',