}
+test_trash_purge_schedule_staggering() {
+ echo "Testing trash purge schedule staggering..."
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+
+ # Initial empty check
+ test "$(ceph rbd trash purge schedule list)" = "{}"
+ ceph rbd trash purge schedule status | fgrep '"scheduled": []'
+
+ # Create 50 namespaces
+ for i in {1..50}; do
+ rbd namespace create "rbd2/test$i"
+ done
+
+ # Helper to get status JSON and verify all namespaces are scheduled
+ get_trash_purge_schedule_status() {
+ local num_scheduled=$1
+ local -n status_ref=$2
+
+ local list_json
+ list_json=$(rbd trash purge schedule ls -p rbd2 -R --format json)
+
+ local list_namespaces=()
+ mapfile -t list_namespaces < <(
+ jq -r 'sort_by(.namespace) | .[].namespace' <<< "$list_json"
+ )
+ [ "${#list_namespaces[@]}" -eq "$num_scheduled" ] || return 1
+
+ # Poll status until it has all scheduled namespaces
+ for ((j=0; j<12; j++)); do
+ status_ref=$(rbd trash purge schedule status -p rbd2 --format json)
+ [ "$(jq 'length' <<< "$status_ref")" -eq "${#list_namespaces[@]}" ] && break
+ sleep 10
+ done
+
+ local status_namespaces=()
+ mapfile -t status_namespaces < <(
+ jq -r 'sort_by(.namespace) | .[].namespace' <<< "$status_ref"
+ )
+ for i in "${!list_namespaces[@]}"; do
+ [[ "${list_namespaces[i]}" != "${status_namespaces[i]}" ]] && return 1;
+ done
+ return 0
+ }
+
+ # Helper to check staggering of schedules
+ are_trash_purge_schedules_staggered() {
+ local status_json=$1
+ local interval_min=$2
+ local unique_times=()
+ mapfile -t unique_times < <(jq -r '.[].schedule_time' <<< "$status_json" | sort -u)
+
+ # Expect one unique time slot per interval minute (1-minute scheduler granularity)
+ [ "${#unique_times[@]}" -eq "$interval_min" ] || return 1
+
+ # Check that consecutive schedule times are exactly 1 minute apart
+ local prev_epoch=$(( $(date -d "${unique_times[0]}" +%s)/60 ))
+ for ((i=1;i<${#unique_times[@]};i++)); do
+ local curr=$(( $(date -d "${unique_times[i]}" +%s)/60 ))
+ [ $((curr - prev_epoch)) -eq 1 ] || return 1
+ prev_epoch=$curr
+ done
+ return 0
+ }
+
+ # Verify that `schedule add/rm` maintains proper staggering
+ local interval_min=5
+ local status_json
+ local num_scheduled_namespaces=40
+ # Schedule namespaces test1..test40
+ for ((i=1; i<=40; i++)); do
+ rbd trash purge schedule add -p "rbd2/test$i" "${interval_min}m"
+ done
+ get_trash_purge_schedule_status "$num_scheduled_namespaces" status_json
+ are_trash_purge_schedules_staggered "$status_json" "$interval_min"
+
+ # Shift scheduling range to test6..test45
+ for ((i=41; i<=45; i++)); do
+ rbd trash purge schedule add -p "rbd2/test$i" "${interval_min}m"
+ done
+ for ((i=1; i<=5; i++)); do
+ rbd trash purge schedule rm -p "rbd2/test$i"
+ done
+ get_trash_purge_schedule_status "$num_scheduled_namespaces" status_json
+ are_trash_purge_schedules_staggered "$status_json" "$interval_min"
+
+ # Shift scheduling range to test11..test50
+ for ((i=46; i<=50; i++)); do
+ rbd trash purge schedule add -p "rbd2/test$i" "${interval_min}m"
+ done
+ for ((i=6; i<=10; i++)); do
+ rbd trash purge schedule rm -p "rbd2/test$i"
+ done
+ get_trash_purge_schedule_status "$num_scheduled_namespaces" status_json
+ are_trash_purge_schedules_staggered "$status_json" "$interval_min"
+
+ # Add schedules for test1..test10 with explicit start time.
+ # These should all share the same next schedule_time.
+ num_scheduled_namespaces=50
+ for ((i=1; i<=10; i++)); do
+ rbd trash purge schedule add -p "rbd2/test$i" "${interval_min}m" 2020-01-01
+ done
+
+ # Get updated status
+ get_trash_purge_schedule_status "$num_scheduled_namespaces" status_json
+
+ # Split status into two sets:
+ # test1..test10 (explicit start-time)
+ # test11..test50 (should remain staggered)
+ local anchored_times=()
+ local staggered_json
+
+ # Extract schedule times for namespaces test1..test10
+ mapfile -t anchored_times < <(
+ jq -r '.[]
+ | select(.namespace | test("^test([1-9]|10)$"))
+ | .schedule_time' <<< "$status_json" | sort -u
+ )
+
+ # All anchored schedules should share exactly one schedule_time
+ [ "${#anchored_times[@]}" -eq 1 ] || return 1
+
+ # Extract JSON only for namespaces test11..test50
+ staggered_json=$(jq '
+ map(select(.namespace | test("^test(1[1-9]|[2-4][0-9]|50)$")))
+ ' <<< "$status_json")
+
+ # Verify these remain properly staggered
+ are_trash_purge_schedules_staggered "$staggered_json" "$interval_min"
+
+ # Cleanup: remove all schedules
+ for ((i=1; i<=50; i++)); do
+ rbd trash purge schedule rm -p "rbd2/test$i"
+ done
+
+ # Wait until schedule status becomes empty
+ local empty_status
+ for ((j=0; j<12; j++)); do
+ empty_status=$(rbd trash purge schedule status -p rbd2 --format json)
+ [ "$(jq 'length' <<< "$empty_status")" -eq 0 ] && break
+ sleep 5
+ done
+
+ [ "$(jq 'length' <<< "$empty_status")" -eq 0 ] || {
+ echo "Error: trash purge schedule status not empty after removals"
+ return 1
+ }
+
+ # Remove namespaces
+ for ((i=1; i<=50; i++)); do
+ rbd namespace rm "rbd2/test$i"
+ done
+
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
test_mirror_snapshot_schedule() {
echo "testing mirror snapshot schedule..."
remove_images
ceph osd pool rm rbd3 rbd3 --yes-i-really-really-mean-it
}
+test_mirror_snapshot_schedule_staggering() {
+ echo "Testing mirror snapshot schedule staggering..."
+
+ remove_images
+ ceph osd pool create rbd2 8
+ rbd pool init rbd2
+ rbd mirror pool enable rbd2 image
+ rbd mirror pool peer add rbd2 cluster1
+
+ # Initial empty check
+ test "$(ceph rbd mirror snapshot schedule list)" = "{}"
+ ceph rbd mirror snapshot schedule status | fgrep '"scheduled_images": []'
+
+ # Create 50 images
+ for i in {1..50}; do
+ rbd create $RBD_CREATE_ARGS -s 1 "rbd2/test$i"
+ rbd mirror image enable "rbd2/test$i" snapshot
+ done
+
+ # Helper to get status JSON and verify all images are scheduled
+ get_mirror_snapshot_schedule_status() {
+ local num_scheduled=$1
+ local -n status_ref=$2
+
+ local list_json
+ list_json=$(rbd mirror snapshot schedule ls -p rbd2 -R --format json)
+
+ local list_images=()
+ mapfile -t list_images < <(
+ jq -r 'sort_by(.image) | .[].image' <<< "$list_json"
+ )
+ [ "${#list_images[@]}" -eq "$num_scheduled" ] || return 1
+
+ # Poll status until it has all scheduled images
+ for ((j=0;j<12;j++)); do
+ status_ref=$(rbd mirror snapshot schedule status -p rbd2 --format json)
+ [ "$(jq 'length' <<< "$status_ref")" -eq "${#list_images[@]}" ] && break
+ sleep 10
+ done
+
+ local status_images=()
+ mapfile -t status_images < <(
+ jq -r 'sort_by(.image) | .[].image | split("/")[-1]' <<< "$status_ref"
+ )
+ for i in "${!list_images[@]}"; do
+ [[ "${list_images[i]}" != "${status_images[i]}" ]] && return 1;
+ done
+ return 0
+ }
+
+ # Helper to check staggering of schedules
+ are_mirror_snapshot_schedules_staggered() {
+ local status_json=$1 interval_min=$2
+ local unique_times=()
+ mapfile -t unique_times < <(
+ jq -r '.[].schedule_time' <<< "$status_json" | sort -u
+ )
+ # Expect one unique time slot per interval minute (1-minute scheduler granularity)
+ [ "${#unique_times[@]}" -eq "$interval_min" ] || return 1
+
+ # Check that consecutive schedule times are exactly 1 minute apart
+ local prev_epoch=$(( $(date -d "${unique_times[0]}" +%s)/60 ))
+ for ((i=1;i<${#unique_times[@]};i++)); do
+ local curr=$(( $(date -d "${unique_times[i]}" +%s)/60 ))
+ [ $((curr - prev_epoch)) -eq 1 ] || return 1
+ prev_epoch=$curr
+ done
+ return 0
+ }
+
+ # Verify that `schedule add/rm` maintains proper staggering
+ local interval_min=5
+ local status_json
+ local num_scheduled_images=40
+ # Schedule images test1..test40
+ for ((i=1; i<=40; i++)); do
+ rbd mirror snapshot schedule add -p rbd2 --image "test$i" "${interval_min}m"
+ done
+ get_mirror_snapshot_schedule_status "$num_scheduled_images" status_json
+ are_mirror_snapshot_schedules_staggered "$status_json" "$interval_min"
+
+ # Shift scheduling range to test6..test45
+ for ((i=41; i<=45; i++)); do
+ rbd mirror snapshot schedule add -p rbd2 --image "test$i" "${interval_min}m"
+ done
+ for ((i=1; i<=5; i++)); do
+ rbd mirror snapshot schedule rm -p rbd2 --image "test$i"
+ done
+ get_mirror_snapshot_schedule_status "$num_scheduled_images" status_json
+ are_mirror_snapshot_schedules_staggered "$status_json" "$interval_min"
+
+ # Shift scheduling range to test11..test50
+ for ((i=46; i<=50; i++)); do
+ rbd mirror snapshot schedule add -p rbd2 --image "test$i" "${interval_min}m"
+ done
+ for ((i=6; i<=10; i++)); do
+ rbd mirror snapshot schedule rm -p rbd2 --image "test$i"
+ done
+ get_mirror_snapshot_schedule_status "$num_scheduled_images" status_json
+ are_mirror_snapshot_schedules_staggered "$status_json" "$interval_min"
+
+ # Add schedules for test1..test10 with explicit start time.
+ # These should all share the same next schedule_time.
+ num_scheduled_images=50
+ for ((i=1; i<=10; i++)); do
+ rbd mirror snapshot schedule add -p rbd2 --image "test$i" "${interval_min}m" 2020-01-01
+ done
+
+ # Get updated status
+ get_mirror_snapshot_schedule_status "$num_scheduled_images" status_json
+
+ # Split status into two sets:
+ # test1..test10 (explicit start-time)
+ # test11..test50 (should remain staggered)
+ local anchored_times=()
+ local staggered_json
+
+ # Extract schedule times for images test1..test10
+ mapfile -t anchored_times < <(
+ jq -r '.[]
+ | select(.image | test("^rbd2/test([1-9]|10)$"))
+ | .schedule_time' <<< "$status_json" | sort -u
+ )
+
+ # All anchored schedules should share exactly one schedule_time
+ [ "${#anchored_times[@]}" -eq 1 ] || return 1
+
+ # Extract JSON only for images test11..test50
+ staggered_json=$(jq '
+ map(select(.image | test("^rbd2/test(1[1-9]|[2-4][0-9]|50)$")))
+ ' <<< "$status_json")
+
+ # Verify these remain properly staggered
+ are_mirror_snapshot_schedules_staggered "$staggered_json" "$interval_min"
+
+ # Cleanup: remove all schedules
+ for ((i=1; i<=50; i++)); do
+ rbd mirror snapshot schedule rm -p rbd2 --image "test$i"
+ done
+
+ # Wait until schedule status becomes empty
+ local empty_status
+ for ((j=0; j<12; j++)); do
+ empty_status=$(rbd mirror snapshot schedule status -p rbd2 --format json)
+ [ "$(jq 'length' <<< "$empty_status")" -eq 0 ] && break
+ sleep 5
+ done
+ [ "$(jq 'length' <<< "$empty_status")" -eq 0 ] || {
+ echo "Error: snapshot schedule status not empty after removals"
+ return 1
+ }
+
+ # Remove images
+ for ((i=1; i<=50; i++)); do
+ rbd rm "rbd2/test$i"
+ done
+
+ ceph osd pool rm rbd2 rbd2 --yes-i-really-really-mean-it
+}
+
test_perf_image_iostat() {
echo "testing perf image iostat..."
remove_images
test_namespace
test_trash_purge_schedule
test_trash_purge_schedule_recovery
+test_trash_purge_schedule_staggering
test_mirror_snapshot_schedule
test_mirror_snapshot_schedule_recovery
+test_mirror_snapshot_schedule_staggering
test_perf_image_iostat
test_perf_image_iostat_recovery
test_mirror_pool_peer_bootstrap_create