ceph:
log-whitelist:
- overall HEALTH_
- - (OSD_DOWN)
- - (OSD_
+ - \(OSD_DOWN\)
+ - \(OSD_
- but it is still running
# MDS daemon 'b' is not responding, replacing it as rank 0 with standby 'a'
- is not responding
fs: xfs # this implicitly means /dev/vd? are used instead of directories
wait-for-scrub: false
log-whitelist:
- - (OSD_
- - (PG_
+ - \(OSD_
+ - \(PG_
conf:
global:
mon pg warn min per osd: 2
ceph:
log-whitelist:
- overall HEALTH_
- - (FS_DEGRADED)
- - (MDS_FAILED)
- - (MDS_DEGRADED)
- - (FS_WITH_FAILED_MDS)
- - (MDS_DAMAGE)
+ - \(FS_DEGRADED\)
+ - \(MDS_FAILED\)
+ - \(MDS_DEGRADED\)
+ - \(FS_WITH_FAILED_MDS\)
+ - \(MDS_DAMAGE\)
ceph:
log-whitelist:
- overall HEALTH_
- - (FS_DEGRADED)
- - (MDS_FAILED)
- - (MDS_DEGRADED)
- - (FS_WITH_FAILED_MDS)
+ - \(FS_DEGRADED\)
+ - \(MDS_FAILED\)
+ - \(MDS_DEGRADED\)
+ - \(FS_WITH_FAILED_MDS\)
ceph:
log-whitelist:
- reached quota
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
tasks:
- ceph-fuse:
- workunit:
overrides:
ceph:
log-whitelist:
- - (MDS_TRIM)
+ - \(MDS_TRIM\)
log-whitelist:
- but it is still running
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (PG_
- - (OSD_
- - (OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
- - (TOO_FEW_PGS)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(TOO_FEW_PGS\)
tasks:
- workunit:
clients:
log-whitelist:
- but it is still running
- overall HEALTH_
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
tasks:
- workunit:
clients:
log-whitelist:
- but it is still running
- overall HEALTH_
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
tasks:
- workunit:
clients:
log-whitelist:
- but it is still running
- overall HEALTH_
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
tasks:
- workunit:
clients:
wait-for-scrub: false
log-whitelist:
- overall HEALTH_
- - (MGR_DOWN)
- - (PG_
+ - \(MGR_DOWN\)
+ - \(PG_
- cephfs_test_runner:
modules:
- tasks.mgr.test_failover
ceph:
log-whitelist:
- overall HEALTH_
- - (MON_DOWN)
- - (TOO_FEW_PGS)
+ - \(MON_DOWN\)
+ - \(TOO_FEW_PGS\)
tasks:
- mon_thrash:
revive_delay: 90
ceph:
log-whitelist:
- overall HEALTH_
- - (MON_DOWN)
+ - \(MON_DOWN\)
conf:
osd:
mon client ping interval: 4
ceph:
log-whitelist:
- overall HEALTH_
- - (MON_DOWN)
+ - \(MON_DOWN\)
tasks:
- mon_thrash:
revive_delay: 20
ceph:
log-whitelist:
- overall HEALTH_
- - (MON_DOWN)
+ - \(MON_DOWN\)
conf:
mon:
paxos min: 10
ceph:
log-whitelist:
- overall HEALTH_
- - (MON_DOWN)
+ - \(MON_DOWN\)
conf:
mon:
paxos min: 10
log-whitelist:
- slow request
- overall HEALTH_
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
tasks:
- exec:
client.0:
ceph:
log-whitelist:
- overall HEALTH_
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
tasks:
- exec:
client.0:
log-whitelist:
- reached quota
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
- - (POOL_FULL)
- - (REQUEST_SLOW)
- - (MON_DOWN)
- - (PG_
- - (POOL_APP_NOT_ENABLED)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_FULL\)
+ - \(REQUEST_SLOW\)
+ - \(MON_DOWN\)
+ - \(PG_
+ - \(POOL_APP_NOT_ENABLED\)
conf:
global:
debug objecter: 20
log-whitelist:
- but it is still running
- overall HEALTH_
- - (PG_
- - (MON_DOWN)
+ - \(PG_
+ - \(MON_DOWN\)
tasks:
- workunit:
clients:
- ceph:
log-whitelist:
- overall HEALTH_
- - (MON_DOWN)
+ - \(MON_DOWN\)
- mon_recovery:
osd max object namespace len: 64
log-whitelist:
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (TOO_FEW_PGS)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(TOO_FEW_PGS\)
- ceph_objectstore_tool:
objects: 20
- ceph:
log-whitelist:
- overall HEALTH_
- - (MGR_DOWN)
+ - \(MGR_DOWN\)
- exec:
mon.a:
- ceph restful create-key admin
- ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
conf:
global:
osd max object name len: 460
- ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
conf:
global:
osd max object name len: 460
log-whitelist:
- is full
- overall HEALTH_
- - (POOL_FULL)
- - (POOL_NEAR_FULL)
- - (CACHE_POOL_NO_HIT_SET)
+ - \(POOL_FULL\)
+ - \(POOL_NEAR_FULL\)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- install:
- ceph:
log-whitelist:
- but it is still running
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
- workunit:
clients:
all:
- ceph:
log-whitelist:
- overall HEALTH_
- - (PG_
- - (OSD_
- - (OBJECT_
+ - \(PG_
+ - \(OSD_
+ - \(OBJECT_
conf:
osd:
osd debug reject backfill probability: .3
ceph:
log-whitelist:
- overall HEALTH_
- - (PG_
+ - \(PG_
conf:
global:
osd heartbeat grace: 40
ceph:
log-whitelist:
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_DEGRADED)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
conf:
osd:
debug osd: 5
ceph:
log-whitelist:
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_DEGRADED)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
conf:
osd:
debug osd: 5
log-whitelist:
- but it is still running
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
- dump_stuck:
log-whitelist:
- objects unfound and apparently lost
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
- ec_lost_unfound:
log-whitelist:
- objects unfound and apparently lost
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
- rep_lost_unfound_delete:
log-whitelist:
- objects unfound and apparently lost
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
- lost_unfound:
log-whitelist:
- but it is still running
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
conf:
osd:
osd min pg log entries: 5
log-whitelist:
- but it is still running
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_
conf:
osd:
osd min pg log entries: 5
log-whitelist:
- but it is still running
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_DEGRADED)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
conf:
osd:
osd min pg log entries: 5
log-whitelist:
- objects unfound and apparently lost
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
- peer:
- had wrong cluster addr
- reached quota
- overall HEALTH_
- - (POOL_FULL)
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
- workunit:
clients:
all:
log-whitelist:
- no reply from
- overall HEALTH_
- - (MON_DOWN)
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
- full_sequential:
- radosbench:
clients: [client.0]
debug osd: 5
log-whitelist:
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (SMALLER_PGP_NUM)
- - (OBJECT_
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
tasks:
- install:
- ceph:
fs: xfs
log-whitelist:
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_DEGRADED)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
- resolve_stuck_peering:
- but it is still running
- had wrong client addr
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_DEGRADED)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
conf:
client.rest0:
debug ms: 1
- ceph:
log-whitelist:
- overall HEALTH_
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
- workunit:
clients:
all:
log-whitelist:
- objects unfound and apparently lost
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (OBJECT_DEGRADED)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(OBJECT_DEGRADED\)
- watch_notify_same_primary:
clients: [client.0]
# setting luminous triggers peering, which *might* trigger health alerts
log-whitelist:
- overall HEALTH_
- - (PG_AVAILABILITY)
- - (PG_DEGRADED)
+ - \(PG_AVAILABILITY\)
+ - \(PG_DEGRADED\)
thrashosds:
chance_thrash_cluster_full: 0
ceph:
log-whitelist:
- reached quota
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
crush_tunables: hammer
conf:
client:
ceph:
log-whitelist:
- overall HEALTH_
- - (MON_DOWN)
- - (OSDMAP_FLAGS)
- - (SMALLER_PGP_NUM)
- - (POOL_APP_NOT_ENABLED)
+ - \(MON_DOWN\)
+ - \(OSDMAP_FLAGS\)
+ - \(SMALLER_PGP_NUM\)
+ - \(POOL_APP_NOT_ENABLED\)
tasks:
- mon_recovery:
log-whitelist:
- reached quota
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
- - (POOL_FULL)
- - (SMALLER_PGP_NUM)
- - (REQUEST_SLOW)
- - (CACHE_POOL_NEAR_FULL)
- - (POOL_APP_NOT_ENABLED)
+ - \(CACHE_POOL_NO_HIT_SET\)
+ - \(POOL_FULL\)
+ - \(SMALLER_PGP_NUM\)
+ - \(REQUEST_SLOW\)
+ - \(CACHE_POOL_NEAR_FULL\)
+ - \(POOL_APP_NOT_ENABLED\)
conf:
client:
debug ms: 1
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
overrides:
ceph:
log-whitelist:
- - (REQUEST_SLOW)
+ - \(REQUEST_SLOW\)
tasks:
- workunit:
clients:
fs: xfs
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
conf:
client:
rbd default data pool: datapool
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- exec:
client.0:
fs: xfs
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
- workunit:
clients:
all: [rbd/test_rbd_mirror.sh]
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
conf:
client:
rbd clone copy on read: true
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- overall HEALTH_
- - (CACHE_POOL_NO_HIT_SET)
+ - \(CACHE_POOL_NO_HIT_SET\)
tasks:
- workunit:
clients:
ceph:
log-whitelist:
- reached quota
- - (POOL_APP_NOT_ENABLED)
+ - \(POOL_APP_NOT_ENABLED\)
conf:
global:
ms inject delay max: 1
add_osds_to_crush: true
log-whitelist:
- overall HEALTH_
- - (FS_
- - (MDS_
+ - \(FS_
+ - \(MDS_
- print: "**** done ceph"
- install.upgrade:
mon.a:
ceph:
log-whitelist:
- overall HEALTH_
- - (OSDMAP_FLAGS)
- - (OSD_
- - (PG_
- - (POOL_
- - (CACHE_POOL_
- - (SMALLER_PGP_NUM)
- - (OBJECT_
- - (REQUEST_SLOW)
- - (TOO_FEW_PGS)
+ - \(OSDMAP_FLAGS\)
+ - \(OSD_
+ - \(PG_
+ - \(POOL_
+ - \(CACHE_POOL_
+ - \(SMALLER_PGP_NUM\)
+ - \(OBJECT_
+ - \(REQUEST_SLOW\)
+ - \(TOO_FEW_PGS\)