from cephadmlib.locking import FileLock
from cephadmlib.daemon_identity import DaemonIdentity, DaemonSubIdentity
from cephadmlib.packagers import create_packager, Packager
-from cephadmlib.logging import cephadm_init_logging, Highlight, LogDestination
+from cephadmlib.logging import (
+ cephadm_init_logging,
+ Highlight,
+ LogDestination,
+ write_cluster_logrotate_config,
+)
from cephadmlib.systemd import check_unit, check_units
from cephadmlib.container_types import (
CephContainer,
if os.path.exists(ctx.logrotate_dir + f'/ceph-{fsid}'):
return
- # logrotate for the cluster
- with write_new(ctx.logrotate_dir + f'/ceph-{fsid}', perms=None) as f:
- """
- This is a bit sloppy in that the killall/pkill will touch all ceph daemons
- in all containers, but I don't see an elegant way to send SIGHUP *just* to
- the daemons for this cluster. (1) systemd kill -s will get the signal to
- podman, but podman will exit. (2) podman kill will get the signal to the
- first child (bash), but that isn't the ceph daemon. This is simpler and
- should be harmless.
- """
- targets: List[str] = [
- 'ceph-mon',
- 'ceph-mgr',
- 'ceph-mds',
- 'ceph-osd',
- 'ceph-fuse',
- 'radosgw',
- 'rbd-mirror',
- 'cephfs-mirror',
- 'tcmu-runner'
- ]
-
- f.write("""# created by cephadm
-/var/log/ceph/%s/*.log {
- rotate 7
- daily
- compress
- sharedscripts
- postrotate
- killall -q -1 %s || pkill -1 -x '%s' || true
- endscript
- missingok
- notifempty
- su root root
-}
-""" % (fsid, ' '.join(targets), '|'.join(targets)))
+ write_cluster_logrotate_config(ctx, fsid)
def get_unit_file(ctx: CephadmContext, fsid: str) -> str:
from .context import CephadmContext
from .constants import QUIET_LOG_LEVEL, LOG_DIR
+from cephadmlib.file_utils import write_new
+
+from cephadmlib import templating
+
class _ExcludeErrorsFilter(logging.Filter):
def filter(self, record: logging.LogRecord) -> bool:
}
-_logrotate_data = """# created by cephadm
-/var/log/ceph/cephadm.log {
- rotate 7
- daily
- compress
- missingok
- notifempty
- su root root
-}
-"""
-
-
_VERBOSE_HANDLERS = [
'console',
'console_stdout',
logger.setLevel(QUIET_LOG_LEVEL)
- if not os.path.exists(ctx.logrotate_dir + '/cephadm'):
- with open(ctx.logrotate_dir + '/cephadm', 'w') as f:
- f.write(_logrotate_data)
+ write_cephadm_logrotate_config(ctx)
for handler in logger.handlers:
# the following little hack ensures that no matter how cephadm is named
if ctx.verbose and handler.name in _VERBOSE_HANDLERS:
handler.setLevel(QUIET_LOG_LEVEL)
logger.debug('%s\ncephadm %s' % ('-' * 80, args))
+
+
+def write_cephadm_logrotate_config(ctx: CephadmContext) -> None:
+ if not os.path.exists(ctx.logrotate_dir + '/cephadm'):
+ with open(ctx.logrotate_dir + '/cephadm', 'w') as f:
+ cephadm_logrotate_config = templating.render(
+ ctx, templating.Templates.cephadm_logrotate_config
+ )
+ f.write(cephadm_logrotate_config)
+
+
+def write_cluster_logrotate_config(ctx: CephadmContext, fsid: str) -> None:
+ # logrotate for the cluster
+ with write_new(ctx.logrotate_dir + f'/ceph-{fsid}', perms=None) as f:
+ """
+ See cephadm/cephadmlib/templates/cluster.logrotate.config.j2 to
+ get a better idea what this comment is referring to
+
+ This is a bit sloppy in that the killall/pkill will touch all ceph daemons
+ in all containers, but I don't see an elegant way to send SIGHUP *just* to
+ the daemons for this cluster. (1) systemd kill -s will get the signal to
+ podman, but podman will exit. (2) podman kill will get the signal to the
+ first child (bash), but that isn't the ceph daemon. This is simpler and
+ should be harmless.
+ """
+ targets: List[str] = [
+ 'ceph-mon',
+ 'ceph-mgr',
+ 'ceph-mds',
+ 'ceph-osd',
+ 'ceph-fuse',
+ 'radosgw',
+ 'rbd-mirror',
+ 'cephfs-mirror',
+ 'tcmu-runner',
+ ]
+
+ logrotate_config = templating.render(
+ ctx,
+ templating.Templates.cluster_logrotate_config,
+ fsid=fsid,
+ targets=targets,
+ )
+
+ f.write(logrotate_config)
--- /dev/null
+# created by cephadm
+/var/log/ceph/cephadm.log {
+ rotate 7
+ daily
+ compress
+ missingok
+ notifempty
+ su root root
+}
--- /dev/null
+# created by cephadm
+/var/log/ceph/{{ fsid }}/*.log {
+ rotate 7
+ daily
+ compress
+ sharedscripts
+ postrotate
+ killall -q -1 {{ targets|join(' ') }} || pkill -1 -x '{{ targets|join('|') }}' || true
+ endscript
+ missingok
+ notifempty
+ su root root
+}
ceph_service = 'ceph.service.j2'
agent_service = 'agent.service.j2'
+ cluster_logrotate_config = 'cluster.logrotate.config.j2'
+ cephadm_logrotate_config = 'cephadm.logrotate.config.j2'
def __str__(self) -> str:
return self.value
--- /dev/null
+from unittest import mock
+
+import pytest
+
+from tests.fixtures import import_cephadm, cephadm_fs
+
+from cephadmlib import logging
+
+
+_cephadm = import_cephadm()
+
+def test_cluster_logrotate_config(cephadm_fs):
+ ctx = _cephadm.CephadmContext()
+ ctx.logrotate_dir = '/my/log/dir'
+ fsid = '5dcc9af0-7cd3-11ee-9e84-525400babd0a'
+
+ cephadm_fs.create_dir(ctx.logrotate_dir)
+
+ expected_cluster_logrotate_file = """# created by cephadm
+/var/log/ceph/5dcc9af0-7cd3-11ee-9e84-525400babd0a/*.log {
+ rotate 7
+ daily
+ compress
+ sharedscripts
+ postrotate
+ killall -q -1 ceph-mon ceph-mgr ceph-mds ceph-osd ceph-fuse radosgw rbd-mirror cephfs-mirror tcmu-runner || pkill -1 -x 'ceph-mon|ceph-mgr|ceph-mds|ceph-osd|ceph-fuse|radosgw|rbd-mirror|cephfs-mirror|tcmu-runner' || true
+ endscript
+ missingok
+ notifempty
+ su root root
+}"""
+
+ logging.write_cluster_logrotate_config(ctx, fsid)
+
+ with open(ctx.logrotate_dir + f'/ceph-{fsid}', 'r') as f:
+ assert f.read() == expected_cluster_logrotate_file
+
+def test_cephadm_logrotate_config(cephadm_fs):
+ ctx = _cephadm.CephadmContext()
+ ctx.logrotate_dir = '/my/log/dir'
+
+ cephadm_fs.create_dir(ctx.logrotate_dir)
+
+ expected_cephadm_logrotate_file = """# created by cephadm
+/var/log/ceph/cephadm.log {
+ rotate 7
+ daily
+ compress
+ missingok
+ notifempty
+ su root root
+}"""
+
+ logging.write_cephadm_logrotate_config(ctx)
+
+ with open(ctx.logrotate_dir + f'/cephadm', 'r') as f:
+ assert f.read() == expected_cephadm_logrotate_file