function help() {
>&2 echo "Usage : ceph-lazy [-d | -h] [command] [parameters]
-Ceph complex quering tool - Version $VERSION
+Ceph complex querying tool - Version $VERSION
OPTIONS
========
exit 1
fi
- [ $DEBUG -eq 1 ] && echoinfo "Looking for primary PGs beloging to OSD $posd"
+ [ $DEBUG -eq 1 ] && echoinfo "Looking for primary PGs belonging to OSD $posd"
$CEPH pg dump pgs --format json 2>/dev/null | $JQ --argjson posd $posd '.[] | select(.acting_primary==$posd).pgid'
}
#
-# Print all pgs (primay & secondary) hosted by an OSD
+# Print all pgs (primary & secondary) hosted by an OSD
#
function find_all_pg_from_osd() {
def word_len(word):
"""
- Returns the word lenght, minus any color codes.
+ Returns the word length, minus any color codes.
"""
if word[0] == '\x1b':
return len(word) - 9
ceph rbd recover tool is used for recovering ceph rbd image, when all ceph services are killed.
it is based on ceph-0.80.x (Firefly and newer)
- currently, ceph service(ceph-mon, ceph-osd) evently are not avaiable caused by bugs or sth else
+ currently, ceph service(ceph-mon, ceph-osd) evently are not available caused by bugs or sth else
, especially on large scale ceph cluster, so that the ceph cluster can not supply service
-and rbd images can not be accessed. In this case, a tool to recover rbd image is nessecary.
+and rbd images can not be accessed. In this case, a tool to recover rbd image is necessary.
ceph rbd recover tool is just used for this, it can collect all objects of an image from distributed
osd nodes with the latest pg epoch, and splice objects by offset to a complete image. To make sure
object data is complete, this tool does flush osd journal on each osd node before recovering.
admin_node: {rbd-recover-tool common_h epoch_h metadata_h database_h}
osd: {osd_job common_h epoch_h metadata_h} #/var/rbd_tool/osd_job
in this architecture, admin_node acts as client, osds act as server.
-so, they run diffrent files:
+so, they run different files:
on admin_node run: rbd-recover-tool <action> [<parameters>]
-on osd node run: ./osd_job <funtion> <parameters>
+on osd node run: ./osd_job <function> <parameters>
admin_node will copy files: osd_job, common_h, epoch_h, metadata_h to remote osd node
if admin_node operation is failed, you can check it on osd node
cd /var/rbd_tool/osd_job
./osd_job <operation>
-<opeartion> :
+<operation> :
do_image_id <image_id_hobject> #get image id of image format v2
do_image_id <image_header_hobject> #get image id of image format v1
do_image_metadata_v1 <image_header_hobject> #get image metadata of image format v1, maybe pg epoch is not latest
}
# on each osd node
-# check ceph enviroment: ssh, ceph-kvstore-tool, osd_data_path
+# check ceph environment: ssh, ceph-kvstore-tool, osd_data_path
function check_ceph_env()
{
local func="check_ceph_env"
local keyword=$1
local res=`cat $omap_list| grep $keyword`
if [ "$res"x = ""x ];then
- #echo "$func: map_header_key = $keyword not exisits"
+ #echo "$func: map_header_key = $keyword not exists"
exit
fi
echo $res|awk -F ":" '{print $2}'
local func="discover_image_snap"
echo "$func ..."
if [ $# -lt 3 ];then
- echo "$func: paramters: <pool_id> <image_name> [<snap_name>]"
+ echo "$func: parameters: <pool_id> <image_name> [<snap_name>]"
exit
fi
local pool_id=$1
fi
# speed up copy snapshot
- # lookup the coresponding head hobject of snap hobject
+ # lookup the corresponding head hobject of snap hobject
# use command: grep <offset> <head hobjects>
#
# eg.
echo "$func ..."
if [ $# -lt 3 ];then
- echo "$func: paramters: <pool_id> <image_name> <snap_name> [<backup_dir>]"
+ echo "$func: parameters: <pool_id> <image_name> <snap_name> [<backup_dir>]"
exit
fi
}
#======================================== image format v1 ========================================
-# <image_name>.rbd inlude 3 parts:
+# <image_name>.rbd include 3 parts:
# header + snap_count*snapshot + snap_count*snap_name
#
# struct rbd_obj_header_ondisk {
before to lookup & recover"
echo "$cmd_name lookup <pool_id>/<image_name>[@[<snap_name>]]
show image metadata: image format, rbd id, size, order, snapseq
- In addtion, for image with snapshots,
+ In addition, for image with snapshots,
this will list all snapshot infomations"
echo "$cmd_name recover <pool_id>/<image_name>[@[<snap_name>]] [</path/to/store/image>]
all snapshots share one image head, to economize disk space