From 92f9be963b69e3b9c0afba4ca3291331aeda1679 Mon Sep 17 00:00:00 2001 From: =?utf8?q?S=C3=A9bastien=20Han?= Date: Wed, 13 Sep 2017 15:46:29 -0600 Subject: [PATCH] rolling_update: clarify update doc MIME-Version: 1.0 Content-Type: text/plain; charset=utf8 Content-Transfer-Encoding: 8bit Closes: https://bugzilla.redhat.com/show_bug.cgi?id=1490188 Signed-off-by: Sébastien Han --- infrastructure-playbooks/rolling_update.yml | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/infrastructure-playbooks/rolling_update.yml b/infrastructure-playbooks/rolling_update.yml index e569639b4..308a188ef 100644 --- a/infrastructure-playbooks/rolling_update.yml +++ b/infrastructure-playbooks/rolling_update.yml @@ -6,12 +6,13 @@ # upgraded one by one. It is really crucial for the update process to happen # in a serialized fashion. DO NOT CHANGE THIS VALUE. # -# The four roles that apply to the ceph hosts will be applied: ceph-common, -# ceph-mon, ceph-osd and ceph-mds. So any changes to configuration, package updates, etc, -# will be applied as part of the rolling update process. # - -# /!\ DO NOT FORGET TO CHANGE THE RELEASE VERSION FIRST! /!\ +# If you run a Ceph community version, you have to change the variable: ceph_stable_release to the new release +# +# If you run Red Hat Ceph Storage and are doing a **major** update (e.g: from 2 to 3), you have two options: +# - if you use a CDN, you have to change the ceph_rhcs_version to a newer one +# - if you use an ISO, you have to change the ceph_rhcs_iso_path to the directory containing the new Ceph version +# - name: confirm whether user really meant to upgrade the cluster hosts: localhost -- 2.39.5