]> git-server-git.apps.pok.os.sepia.ceph.com Git - ceph-ansible.git/commitdiff
Common: Restore check_socket 1455/head
authorGuillaume Abrioux <gabrioux@redhat.com>
Tue, 18 Apr 2017 10:40:43 +0000 (12:40 +0200)
committerleseb <seb@redhat.com>
Mon, 24 Apr 2017 10:31:49 +0000 (12:31 +0200)
Restore the check_socket that was removed by `5bec62b`.
This commit also improves the logging in `restart_*_daemon.sh` scripts

Signed-off-by: Guillaume Abrioux <gabrioux@redhat.com>
roles/ceph-common/handlers/main.yml
roles/ceph-common/tasks/checks/check_socket.yml [new file with mode: 0644]
roles/ceph-common/tasks/main.yml
roles/ceph-common/templates/restart_mon_daemon.sh.j2
roles/ceph-common/templates/restart_osd_daemon.sh.j2
tests/functional/centos/7/journal-collocation/group_vars/all

index 9602a0ce54036c13d634192a7e02b90df3e49c54..1fc4a0ea410de5cc453b44274c6227b486d201e4 100644 (file)
@@ -19,6 +19,9 @@
     listen: "restart ceph mons"
 
   when:
+# We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - socket.rc == 0
+    - ceph_current_fsid.rc == 0
     - mon_group_name in group_names
 
 # This does not just restart OSDs but everything else too. Unfortunately
   - name: restart ceph osds daemon(s)
     command: /tmp/restart_osd_daemon.sh
     listen: "restart ceph osds"
-    when:
-      - handler_health_osd_check
+    when: handler_health_osd_check
+
   when:
+# We do not want to run these checks on initial deployment (`socket.rc == 0`)
+    - socket.rc == 0
+    - ceph_current_fsid.rc == 0
     - osd_group_name in group_names
+# See https://github.com/ceph/ceph-ansible/issues/1457 for the condition below
+    - item in play_hosts
 
 - name: restart ceph mdss
   service:
diff --git a/roles/ceph-common/tasks/checks/check_socket.yml b/roles/ceph-common/tasks/checks/check_socket.yml
new file mode 100644 (file)
index 0000000..79b512c
--- /dev/null
@@ -0,0 +1,15 @@
+---
+# These checks are used to avoid running handlers at initial deployment.
+- name: check for a ceph socket
+  shell: "stat /var/run/ceph/*.asok > /dev/null 2>&1"
+  changed_when: false
+  failed_when: false
+  always_run: true
+  register: socket
+
+- name: check for a rados gateway socket
+  shell: "stat {{ rbd_client_admin_socket_path }}*.asok > /dev/null 2>&1"
+  changed_when: false
+  failed_when: false
+  always_run: true
+  register: socketrgw
index ca25812f7d4446fac4e3e7654d99921e33993310..daf8e3ff97b6bcf50f7712631da050a935597b2d 100644 (file)
@@ -87,6 +87,7 @@
   static: False
 
 - include: facts.yml
+- include: ./checks/check_socket.yml
 - include: create_ceph_initial_dirs.yml
 - include: generate_cluster_fsid.yml
 - include: generate_ceph_conf.yml
index d918b01988c88b95be7f438b5dfa946d1075d026..4424ccacb0ba45b5baf3bde4255ee582083c0703 100644 (file)
@@ -15,6 +15,9 @@ while [ $RETRIES -ne 0 ]; do
   let RETRIES=RETRIES-1
 done
 # If we reach this point, it means there is a problem with the quorum
+echo "Error with quorum."
+echo "cluster status:"
+ceph --cluster ${CLUSTER} -s
 exit 1
 }
 
@@ -29,5 +32,5 @@ while [ $COUNT -ne 0 ]; do
   let COUNT=COUNT-1
 done
 # If we reach this point, it means the socket is not present.
-echo "Error while restarting mon daemon"
+echo "Socket file ${SOCKET} could not be found, which means the monitor is not running."
 exit 1
index 8b0b7d1de3f761ec0792c6bb0aae68c708937052..0f841116143881d40e5ea6f9cf0a198563e151dc 100644 (file)
@@ -17,7 +17,6 @@ check_pgs() {
   exit 1
 }
 
-
 for id in $(ls /var/lib/ceph/osd/ | sed 's/.*-//'); do
   # First, restart daemon(s)
   systemctl restart ceph-osd@${id}
@@ -31,6 +30,6 @@ for id in $(ls /var/lib/ceph/osd/ | sed 's/.*-//'); do
     let COUNT=COUNT-1
   done
   # If we reach this point, it means the socket is not present.
-  echo "Error while restarting mon daemon"
+  echo "Socket file ${SOCKET} could not be found, which means the osd daemon is not running."
   exit 1
 done
index a2f0b06dcee7bc4a0e38b9840c564e2cafae48aa..81857e670f85b600d74a05b276abc72ea9fa8af1 100644 (file)
@@ -12,7 +12,3 @@ journal_collocation: True
 os_tuning_params:
   - { name: kernel.pid_max, value: 4194303 }
   - { name: fs.file-max, value: 26234859 }
-ceph_conf_overrides:
-  global:
-    osd_pool_default_pg_num: 8
-    osd_pool_default_size: 1